comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Agree, single writer anyway - Fixed
void setConfigIncGen(T config) { ConfigState<T> prev = this.config.get(); while ( !this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration()+1, true, config))) { prev = this.config.get(); } }
while ( !this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration()+1, true, config))) {
void setConfigIncGen(T config) { ConfigState<T> prev = this.config.get(); setConfig(prev.getGeneration() + 1, config); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
Agree, single writer anyway - Fixed
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) { prev = this.config.get(); } }
while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) {
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); this.config.set(new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config)); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
when is this useful? it seems pretty self-conflicting; the config itself may have changed, the generation has not changed, but is tagged as changed...
protected void setConfigIfChanged(T config) { ConfigState<T> prev = this.config.get(); this.config.set(new ConfigState<>(true, prev.getGeneration(), !config.equals(prev.getConfig()), config)); }
this.config.set(new ConfigState<>(true, prev.getGeneration(),
protected void setConfigIfChanged(T config) { ConfigState<T> prev = this.config.get(); this.config.set(new ConfigState<>(true, prev.getGeneration(), !config.equals(prev.getConfig()), config)); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
environment.getKeyStoreOptions() -> keyStoreOptions
public SslConfigServerApiImpl(Environment environment) { Security.addProvider(new BouncyCastleProvider()); this.environment = environment; this.configServerApi = new ConfigServerApiImpl( environment.getConfigServerUris(), makeSslConnectionSocketFactory(Optional.empty())); this.keyStoreRefresher = environment.getKeyStoreOptions().map(keyStoreOptions -> { Runnable connectionFactoryRefresher = () -> configServerApi.setSSLConnectionSocketFactory( makeSslConnectionSocketFactory(environment.getKeyStoreOptions())); ConfigServerKeyStoreRefresher keyStoreRefresher = new ConfigServerKeyStoreRefresher( keyStoreOptions, connectionFactoryRefresher, configServerApi); try { keyStoreRefresher.refreshKeyStoreIfNeeded(); connectionFactoryRefresher.run(); } catch (Exception e) { throw new RuntimeException("Failed to acquire certificate to config server", e); } keyStoreRefresher.start(); return keyStoreRefresher; }); }
makeSslConnectionSocketFactory(environment.getKeyStoreOptions()));
public SslConfigServerApiImpl(Environment environment) { Security.addProvider(new BouncyCastleProvider()); this.environment = environment; this.configServerApi = new ConfigServerApiImpl( environment.getConfigServerUris(), makeSslConnectionSocketFactory(Optional.empty())); this.keyStoreRefresher = environment.getKeyStoreOptions().map(keyStoreOptions -> { Runnable connectionFactoryRefresher = () -> configServerApi.setSSLConnectionSocketFactory( makeSslConnectionSocketFactory(Optional.of(keyStoreOptions))); ConfigServerKeyStoreRefresher keyStoreRefresher = new ConfigServerKeyStoreRefresher( keyStoreOptions, connectionFactoryRefresher, configServerApi); try { keyStoreRefresher.refreshKeyStoreIfNeeded(); connectionFactoryRefresher.run(); } catch (Exception e) { throw new RuntimeException("Failed to acquire certificate to config server", e); } keyStoreRefresher.start(); return keyStoreRefresher; }); }
class SslConfigServerApiImpl implements ConfigServerApi { private final ConfigServerApiImpl configServerApi; private final Environment environment; private final Optional<ConfigServerKeyStoreRefresher> keyStoreRefresher; @Override public <T> T get(String path, Class<T> wantedReturnType) { return configServerApi.get(path, wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.post(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.put(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.patch(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return configServerApi.delete(path, wantedReturnType); } @Override public void stop() { keyStoreRefresher.ifPresent(ConfigServerKeyStoreRefresher::stop); configServerApi.stop(); } private SSLConnectionSocketFactory makeSslConnectionSocketFactory(Optional<KeyStoreOptions> keyStoreOptions) { return new SSLConnectionSocketFactory(makeSslContext(keyStoreOptions), makeHostnameVerifier()); } private SSLContext makeSslContext(Optional<KeyStoreOptions> keyStoreOptions) { AthenzSslContextBuilder sslContextBuilder = new AthenzSslContextBuilder(); environment.getTrustStoreOptions().ifPresent(options -> sslContextBuilder.withTrustStore(options.path.toFile(), options.type)); keyStoreOptions.ifPresent(options -> { try { sslContextBuilder.withKeyStore(options.loadKeyStore(), options.password); } catch (Exception e) { throw new RuntimeException("Failed to read key store", e); } }); return sslContextBuilder.build(); } private HostnameVerifier makeHostnameVerifier() { return environment.getAthenzIdentity() .map(identity -> (HostnameVerifier) new AthenzIdentityVerifier(Collections.singleton(identity))) .orElseGet(SSLConnectionSocketFactory::getDefaultHostnameVerifier); } }
class SslConfigServerApiImpl implements ConfigServerApi { private final ConfigServerApiImpl configServerApi; private final Environment environment; private final Optional<ConfigServerKeyStoreRefresher> keyStoreRefresher; @Override public <T> T get(String path, Class<T> wantedReturnType) { return configServerApi.get(path, wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.post(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.put(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.patch(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return configServerApi.delete(path, wantedReturnType); } @Override public void close() { keyStoreRefresher.ifPresent(ConfigServerKeyStoreRefresher::stop); configServerApi.close(); } private SSLConnectionSocketFactory makeSslConnectionSocketFactory(Optional<KeyStoreOptions> keyStoreOptions) { return new SSLConnectionSocketFactory(makeSslContext(keyStoreOptions), makeHostnameVerifier()); } private SSLContext makeSslContext(Optional<KeyStoreOptions> keyStoreOptions) { AthenzSslContextBuilder sslContextBuilder = new AthenzSslContextBuilder(); environment.getTrustStoreOptions().ifPresent(options -> sslContextBuilder.withTrustStore(options.path.toFile(), options.type)); keyStoreOptions.ifPresent(options -> { try { sslContextBuilder.withKeyStore(options.loadKeyStore(), options.password); } catch (Exception e) { throw new RuntimeException("Failed to read key store", e); } }); return sslContextBuilder.build(); } private HostnameVerifier makeHostnameVerifier() { return environment.getAthenzIdentity() .map(identity -> (HostnameVerifier) new AthenzIdentityVerifier(Collections.singleton(identity))) .orElseGet(SSLConnectionSocketFactory::getDefaultHostnameVerifier); } }
Fixed.
public SslConfigServerApiImpl(Environment environment) { Security.addProvider(new BouncyCastleProvider()); this.environment = environment; this.configServerApi = new ConfigServerApiImpl( environment.getConfigServerUris(), makeSslConnectionSocketFactory(Optional.empty())); this.keyStoreRefresher = environment.getKeyStoreOptions().map(keyStoreOptions -> { Runnable connectionFactoryRefresher = () -> configServerApi.setSSLConnectionSocketFactory( makeSslConnectionSocketFactory(environment.getKeyStoreOptions())); ConfigServerKeyStoreRefresher keyStoreRefresher = new ConfigServerKeyStoreRefresher( keyStoreOptions, connectionFactoryRefresher, configServerApi); try { keyStoreRefresher.refreshKeyStoreIfNeeded(); connectionFactoryRefresher.run(); } catch (Exception e) { throw new RuntimeException("Failed to acquire certificate to config server", e); } keyStoreRefresher.start(); return keyStoreRefresher; }); }
makeSslConnectionSocketFactory(environment.getKeyStoreOptions()));
public SslConfigServerApiImpl(Environment environment) { Security.addProvider(new BouncyCastleProvider()); this.environment = environment; this.configServerApi = new ConfigServerApiImpl( environment.getConfigServerUris(), makeSslConnectionSocketFactory(Optional.empty())); this.keyStoreRefresher = environment.getKeyStoreOptions().map(keyStoreOptions -> { Runnable connectionFactoryRefresher = () -> configServerApi.setSSLConnectionSocketFactory( makeSslConnectionSocketFactory(Optional.of(keyStoreOptions))); ConfigServerKeyStoreRefresher keyStoreRefresher = new ConfigServerKeyStoreRefresher( keyStoreOptions, connectionFactoryRefresher, configServerApi); try { keyStoreRefresher.refreshKeyStoreIfNeeded(); connectionFactoryRefresher.run(); } catch (Exception e) { throw new RuntimeException("Failed to acquire certificate to config server", e); } keyStoreRefresher.start(); return keyStoreRefresher; }); }
class SslConfigServerApiImpl implements ConfigServerApi { private final ConfigServerApiImpl configServerApi; private final Environment environment; private final Optional<ConfigServerKeyStoreRefresher> keyStoreRefresher; @Override public <T> T get(String path, Class<T> wantedReturnType) { return configServerApi.get(path, wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.post(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.put(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.patch(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return configServerApi.delete(path, wantedReturnType); } @Override public void stop() { keyStoreRefresher.ifPresent(ConfigServerKeyStoreRefresher::stop); configServerApi.stop(); } private SSLConnectionSocketFactory makeSslConnectionSocketFactory(Optional<KeyStoreOptions> keyStoreOptions) { return new SSLConnectionSocketFactory(makeSslContext(keyStoreOptions), makeHostnameVerifier()); } private SSLContext makeSslContext(Optional<KeyStoreOptions> keyStoreOptions) { AthenzSslContextBuilder sslContextBuilder = new AthenzSslContextBuilder(); environment.getTrustStoreOptions().ifPresent(options -> sslContextBuilder.withTrustStore(options.path.toFile(), options.type)); keyStoreOptions.ifPresent(options -> { try { sslContextBuilder.withKeyStore(options.loadKeyStore(), options.password); } catch (Exception e) { throw new RuntimeException("Failed to read key store", e); } }); return sslContextBuilder.build(); } private HostnameVerifier makeHostnameVerifier() { return environment.getAthenzIdentity() .map(identity -> (HostnameVerifier) new AthenzIdentityVerifier(Collections.singleton(identity))) .orElseGet(SSLConnectionSocketFactory::getDefaultHostnameVerifier); } }
class SslConfigServerApiImpl implements ConfigServerApi { private final ConfigServerApiImpl configServerApi; private final Environment environment; private final Optional<ConfigServerKeyStoreRefresher> keyStoreRefresher; @Override public <T> T get(String path, Class<T> wantedReturnType) { return configServerApi.get(path, wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.post(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.put(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return configServerApi.patch(path, bodyJsonPojo, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return configServerApi.delete(path, wantedReturnType); } @Override public void close() { keyStoreRefresher.ifPresent(ConfigServerKeyStoreRefresher::stop); configServerApi.close(); } private SSLConnectionSocketFactory makeSslConnectionSocketFactory(Optional<KeyStoreOptions> keyStoreOptions) { return new SSLConnectionSocketFactory(makeSslContext(keyStoreOptions), makeHostnameVerifier()); } private SSLContext makeSslContext(Optional<KeyStoreOptions> keyStoreOptions) { AthenzSslContextBuilder sslContextBuilder = new AthenzSslContextBuilder(); environment.getTrustStoreOptions().ifPresent(options -> sslContextBuilder.withTrustStore(options.path.toFile(), options.type)); keyStoreOptions.ifPresent(options -> { try { sslContextBuilder.withKeyStore(options.loadKeyStore(), options.password); } catch (Exception e) { throw new RuntimeException("Failed to read key store", e); } }); return sslContextBuilder.build(); } private HostnameVerifier makeHostnameVerifier() { return environment.getAthenzIdentity() .map(identity -> (HostnameVerifier) new AthenzIdentityVerifier(Collections.singleton(identity))) .orElseGet(SSLConnectionSocketFactory::getDefaultHostnameVerifier); } }
Should update the other references to `/screwdriver/v1/jobreport` in this test as well so that the new path is tested. When we remove the old path we should move these tests to `ApplicationApiTest`.
public void testJobStatusReporting() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().updateSystemVersion(); long projectId = 1; Application app = tester.createApplication(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); Version vespaVersion = new Version("6.1"); notifyCompletion(app.id(), projectId, JobType.component, Optional.empty()); tester.deploy(app, applicationPackage, testZone, projectId); notifyCompletion(app.id(), projectId, JobType.systemTest, Optional.empty()); tester.containerTester().assertResponse(new Request("http: jsonReport(app.id(), JobType.productionUsEast3, projectId, 1L, Optional.empty()) .getBytes(StandardCharsets.UTF_8), Request.Method.POST), new File("unexpected-completion.json"), 400); JobStatus recordedStatus = tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.component); assertNotNull("Status was recorded", recordedStatus); assertTrue(recordedStatus.isSuccess()); assertEquals(vespaVersion, recordedStatus.lastCompleted().get().version()); recordedStatus = tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2); assertNull("Status of never-triggered jobs is empty", recordedStatus); Response response; response = container.handleRequest(new Request("http: assertTrue("Response contains system-test", response.getBodyAsString().contains(JobType.systemTest.jobName())); assertTrue("Response contains staging-test", response.getBodyAsString().contains(JobType.stagingTest.jobName())); assertEquals("Response contains only two items", 2, SlimeUtils.jsonToSlime(response.getBody()).get().entries()); response = container.handleRequest(new Request("http: assertTrue("Response contains system-test", response.getBodyAsString().contains(JobType.systemTest.jobName())); assertTrue("Response contains staging-test", response.getBodyAsString().contains(JobType.stagingTest.jobName())); assertEquals("Response contains only two items", 2, SlimeUtils.jsonToSlime(response.getBody()).get().entries()); Thread.sleep(50); assertResponse(new Request("http: 200, "[]"); }
tester.containerTester().assertResponse(new Request("http:
public void testJobStatusReporting() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().updateSystemVersion(); long projectId = 1; Application app = tester.createApplication(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); Version vespaVersion = new Version("6.1"); notifyCompletion(app.id(), projectId, JobType.component, Optional.empty()); tester.deploy(app, applicationPackage, testZone, projectId); notifyCompletion(app.id(), projectId, JobType.systemTest, Optional.empty()); tester.containerTester().assertResponse(new Request("http: jsonReport(app.id(), JobType.productionUsEast3, projectId, 1L, Optional.empty()) .getBytes(StandardCharsets.UTF_8), Request.Method.POST), new File("unexpected-completion.json"), 400); JobStatus recordedStatus = tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.component); assertNotNull("Status was recorded", recordedStatus); assertTrue(recordedStatus.isSuccess()); assertEquals(vespaVersion, recordedStatus.lastCompleted().get().version()); recordedStatus = tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2); assertNull("Status of never-triggered jobs is empty", recordedStatus); Response response; response = container.handleRequest(new Request("http: assertTrue("Response contains system-test", response.getBodyAsString().contains(JobType.systemTest.jobName())); assertTrue("Response contains staging-test", response.getBodyAsString().contains(JobType.stagingTest.jobName())); assertEquals("Response contains only two items", 2, SlimeUtils.jsonToSlime(response.getBody()).get().entries()); response = container.handleRequest(new Request("http: assertTrue("Response contains system-test", response.getBodyAsString().contains(JobType.systemTest.jobName())); assertTrue("Response contains staging-test", response.getBodyAsString().contains(JobType.stagingTest.jobName())); assertEquals("Response contains only two items", 2, SlimeUtils.jsonToSlime(response.getBody()).get().entries()); Thread.sleep(50); assertResponse(new Request("http: 200, "[]"); }
class ScrewdriverApiTest extends ControllerContainerTest { private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/responses/"; private static final ZoneId testZone = ZoneId.from(Environment.test, RegionName.from("us-east-1")); private static final ZoneId stagingZone = ZoneId.from(Environment.staging, RegionName.from("us-east-3")); @Test public void testGetReleaseStatus() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().assertResponse(new Request("http: "{\"error-code\":\"NOT_FOUND\",\"message\":\"Information about the current system version is not available at this time\"}", 404); tester.controller().updateVersionStatus(VersionStatus.compute(tester.controller())); tester.containerTester().assertResponse(new Request("http: new File("release-response.json"), 200); } @Test @Test public void testJobStatusReportingOutOfCapacity() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().updateSystemVersion(); long projectId = 1; Application app = tester.createApplication(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); notifyCompletion(app.id(), projectId, JobType.component, Optional.empty()); tester.deploy(app, applicationPackage, testZone, projectId); notifyCompletion(app.id(), projectId, JobType.systemTest, Optional.empty()); tester.deploy(app, applicationPackage, stagingZone, projectId); notifyCompletion(app.id(), projectId, JobType.stagingTest, Optional.of(JobError.outOfCapacity)); JobStatus jobStatus = tester.controller().applications().get(app.id()) .get() .deploymentJobs() .jobStatus() .get(JobType.stagingTest); assertFalse(jobStatus.isSuccess()); assertEquals(JobError.outOfCapacity, jobStatus.jobError().get()); } @Test public void testTriggerJobForApplication() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); BuildSystem buildSystem = tester.controller().applications().deploymentTrigger().buildSystem(); tester.containerTester().updateSystemVersion(); Application app = tester.createApplication(); tester.controller().applications().lockOrThrow(app.id(), application -> tester.controller().applications().store(application.withProjectId(1))); assertResponse(new Request("http: new byte[0], Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"foo.bar not found\"}"); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), "invalid".getBytes(StandardCharsets.UTF_8), Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Unknown job name 'invalid'\"}"); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), new byte[0], Request.Method.POST), 200, "{\"message\":\"Triggered component for tenant1.application1\"}"); assertFalse(buildSystem.jobs().isEmpty()); assertEquals(JobType.component.jobName(), buildSystem.jobs().get(0).jobName()); assertEquals(1L, buildSystem.jobs().get(0).projectId()); buildSystem.takeJobsToRun(); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), "staging-test".getBytes(StandardCharsets.UTF_8), Request.Method.POST), 200, "{\"message\":\"Triggered staging-test for tenant1.application1\"}"); assertFalse(buildSystem.jobs().isEmpty()); assertEquals(JobType.stagingTest.jobName(), buildSystem.jobs().get(0).jobName()); assertEquals(1L, buildSystem.jobs().get(0).projectId()); } private void notifyCompletion(ApplicationId app, long projectId, JobType jobType, Optional<JobError> error) throws IOException { assertResponse(new Request("http: jsonReport(app, jobType, projectId, 1L, error).getBytes(StandardCharsets.UTF_8), Request.Method.POST), 200, "ok"); } private static String jsonReport(ApplicationId applicationId, JobType jobType, long projectId, long buildNumber, Optional<JobError> jobError) { return "{\n" + " \"projectId\" : " + projectId + ",\n" + " \"jobName\" :\"" + jobType.jobName() + "\",\n" + " \"buildNumber\" : " + buildNumber + ",\n" + jobError.map(message -> " \"jobError\" : \"" + message + "\",\n").orElse("") + " \"tenant\" :\"" + applicationId.tenant().value() + "\",\n" + " \"application\" :\"" + applicationId.application().value() + "\",\n" + " \"instance\" :\"" + applicationId.instance().value() + "\"\n" + "}"; } }
class ScrewdriverApiTest extends ControllerContainerTest { private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/responses/"; private static final ZoneId testZone = ZoneId.from(Environment.test, RegionName.from("us-east-1")); private static final ZoneId stagingZone = ZoneId.from(Environment.staging, RegionName.from("us-east-3")); @Test public void testGetReleaseStatus() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().assertResponse(new Request("http: "{\"error-code\":\"NOT_FOUND\",\"message\":\"Information about the current system version is not available at this time\"}", 404); tester.controller().updateVersionStatus(VersionStatus.compute(tester.controller())); tester.containerTester().assertResponse(new Request("http: new File("release-response.json"), 200); } @Test @Test public void testJobStatusReportingOutOfCapacity() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); tester.containerTester().updateSystemVersion(); long projectId = 1; Application app = tester.createApplication(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); notifyCompletion(app.id(), projectId, JobType.component, Optional.empty()); tester.deploy(app, applicationPackage, testZone, projectId); notifyCompletion(app.id(), projectId, JobType.systemTest, Optional.empty()); tester.deploy(app, applicationPackage, stagingZone, projectId); notifyCompletion(app.id(), projectId, JobType.stagingTest, Optional.of(JobError.outOfCapacity)); JobStatus jobStatus = tester.controller().applications().get(app.id()) .get() .deploymentJobs() .jobStatus() .get(JobType.stagingTest); assertFalse(jobStatus.isSuccess()); assertEquals(JobError.outOfCapacity, jobStatus.jobError().get()); } @Test public void testTriggerJobForApplication() throws Exception { ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles); BuildSystem buildSystem = tester.controller().applications().deploymentTrigger().buildSystem(); tester.containerTester().updateSystemVersion(); Application app = tester.createApplication(); tester.controller().applications().lockOrThrow(app.id(), application -> tester.controller().applications().store(application.withProjectId(1))); assertResponse(new Request("http: new byte[0], Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"foo.bar not found\"}"); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), "invalid".getBytes(StandardCharsets.UTF_8), Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Unknown job name 'invalid'\"}"); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), new byte[0], Request.Method.POST), 200, "{\"message\":\"Triggered component for tenant1.application1\"}"); assertFalse(buildSystem.jobs().isEmpty()); assertEquals(JobType.component.jobName(), buildSystem.jobs().get(0).jobName()); assertEquals(1L, buildSystem.jobs().get(0).projectId()); buildSystem.takeJobsToRun(); assertResponse(new Request("http: app.id().tenant().value() + "/application/" + app.id().application().value(), "staging-test".getBytes(StandardCharsets.UTF_8), Request.Method.POST), 200, "{\"message\":\"Triggered staging-test for tenant1.application1\"}"); assertFalse(buildSystem.jobs().isEmpty()); assertEquals(JobType.stagingTest.jobName(), buildSystem.jobs().get(0).jobName()); assertEquals(1L, buildSystem.jobs().get(0).projectId()); } private void notifyCompletion(ApplicationId app, long projectId, JobType jobType, Optional<JobError> error) throws IOException { assertResponse(new Request("http: jsonReport(app, jobType, projectId, 1L, error).getBytes(StandardCharsets.UTF_8), Request.Method.POST), 200, "ok"); } private static String jsonReport(ApplicationId applicationId, JobType jobType, long projectId, long buildNumber, Optional<JobError> jobError) { return "{\n" + " \"projectId\" : " + projectId + ",\n" + " \"jobName\" :\"" + jobType.jobName() + "\",\n" + " \"buildNumber\" : " + buildNumber + ",\n" + jobError.map(message -> " \"jobError\" : \"" + message + "\",\n").orElse("") + " \"tenant\" :\"" + applicationId.tenant().value() + "\",\n" + " \"application\" :\"" + applicationId.application().value() + "\",\n" + " \"instance\" :\"" + applicationId.instance().value() + "\"\n" + "}"; } }
Consider creating a helper function that takes the search definition string and builds the SearchBuilder to avoid duplication in the tests.
public void tensorFirstPhaseMustProduceDouble() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: attribute(a)", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } }
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
public void tensorFirstPhaseMustProduceDouble() throws Exception { try { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: attribute(a)", " }", " }", "}" )); builder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } }
class RankingExpressionTypeValidatorTestCase { @Test @Test public void tensorSecondPhaseMustProduceDouble() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(attribute(a))", " }", " second-phase {", " expression: attribute(a)", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } } @Test public void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(if(1>0, attribute(a), attribute(b)))", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])", Exceptions.toMessageString(expected)); } } }
class RankingExpressionTypeValidatorTestCase { @Test @Test public void tensorSecondPhaseMustProduceDouble() throws Exception { try { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(attribute(a))", " }", " second-phase {", " expression: attribute(a)", " }", " }", "}" )); builder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } } @Test public void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception { try { SearchBuilder searchBuilder = new SearchBuilder(); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(if(1>0, attribute(a), attribute(b)))", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])", Exceptions.toMessageString(expected)); } } @Test public void testMacroInvocationTypes() throws Exception { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder builder = new SearchBuilder(rankProfileRegistry); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " macro macro1(attribute_to_use) {", " expression: attribute(attribute_to_use)", " }", " summary-features {", " macro1(a)", " macro1(b)", " }", " }", "}" )); builder.build(); RankProfile profile = builder.getRankProfileRegistry().getRankProfile(builder.getSearch(), "my_rank_profile"); assertEquals(TensorType.fromSpec("tensor(x[],y[])"), summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry()))); assertEquals(TensorType.fromSpec("tensor(z[10])"), summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry()))); } @Test public void testTensorMacroInvocationTypes_Nested() throws Exception { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " macro return_a() {", " expression: return_first(attribute(a), attribute(b))", " }", " macro return_b() {", " expression: return_second(attribute(a), attribute(b))", " }", " macro return_first(e1, e2) {", " expression: e1", " }", " macro return_second(e1, e2) {", " expression: return_first(e2, e1)", " }", " summary-features {", " return_a", " return_b", " }", " }", "}" )); builder.build(); RankProfile profile = builder.getRankProfileRegistry().getRankProfile(builder.getSearch(), "my_rank_profile"); assertEquals(TensorType.fromSpec("tensor(x[],y[])"), summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry()))); assertEquals(TensorType.fromSpec("tensor(z[10])"), summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry()))); } private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) { return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f)); } }
Simplified a bit.
public void tensorFirstPhaseMustProduceDouble() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: attribute(a)", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } }
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
public void tensorFirstPhaseMustProduceDouble() throws Exception { try { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: attribute(a)", " }", " }", "}" )); builder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } }
class RankingExpressionTypeValidatorTestCase { @Test @Test public void tensorSecondPhaseMustProduceDouble() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(attribute(a))", " }", " second-phase {", " expression: attribute(a)", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } } @Test public void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception { try { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder searchBuilder = new SearchBuilder(rankProfileRegistry); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(if(1>0, attribute(a), attribute(b)))", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])", Exceptions.toMessageString(expected)); } } }
class RankingExpressionTypeValidatorTestCase { @Test @Test public void tensorSecondPhaseMustProduceDouble() throws Exception { try { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(attribute(a))", " }", " second-phase {", " expression: attribute(a)", " }", " }", "}" )); builder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[],y[])", Exceptions.toMessageString(expected)); } } @Test public void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception { try { SearchBuilder searchBuilder = new SearchBuilder(); searchBuilder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " first-phase {", " expression: sum(if(1>0, attribute(a), attribute(b)))", " }", " }", "}" )); searchBuilder.build(); fail("Expected exception"); } catch (IllegalArgumentException expected) { assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])", Exceptions.toMessageString(expected)); } } @Test public void testMacroInvocationTypes() throws Exception { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder builder = new SearchBuilder(rankProfileRegistry); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " macro macro1(attribute_to_use) {", " expression: attribute(attribute_to_use)", " }", " summary-features {", " macro1(a)", " macro1(b)", " }", " }", "}" )); builder.build(); RankProfile profile = builder.getRankProfileRegistry().getRankProfile(builder.getSearch(), "my_rank_profile"); assertEquals(TensorType.fromSpec("tensor(x[],y[])"), summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry()))); assertEquals(TensorType.fromSpec("tensor(z[10])"), summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry()))); } @Test public void testTensorMacroInvocationTypes_Nested() throws Exception { SearchBuilder builder = new SearchBuilder(); builder.importString(joinLines( "search test {", " document test { ", " field a type tensor(x[],y[]) {", " indexing: attribute", " }", " field b type tensor(z[10]) {", " indexing: attribute", " }", " }", " rank-profile my_rank_profile {", " macro return_a() {", " expression: return_first(attribute(a), attribute(b))", " }", " macro return_b() {", " expression: return_second(attribute(a), attribute(b))", " }", " macro return_first(e1, e2) {", " expression: e1", " }", " macro return_second(e1, e2) {", " expression: return_first(e2, e1)", " }", " summary-features {", " return_a", " return_b", " }", " }", "}" )); builder.build(); RankProfile profile = builder.getRankProfileRegistry().getRankProfile(builder.getSearch(), "my_rank_profile"); assertEquals(TensorType.fromSpec("tensor(x[],y[])"), summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry()))); assertEquals(TensorType.fromSpec("tensor(z[10])"), summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry()))); } private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) { return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f)); } }
Could consider `BucketSpaceStats.empty()` / `BucketSpaceStats.invalid()` utility factory function to make this more explicit?
public ContentNodeStats(StorageNode storageNode) { this.nodeIndex = storageNode.getIndex(); for (StorageNode.BucketSpaceStats stats : storageNode.getBucketSpacesStats()) { if (stats.valid()) { this.bucketSpaces.put(stats.getName(), new BucketSpaceStats(stats.getBucketStats().getTotal(), stats.getBucketStats().getPending())); } else { this.bucketSpaces.put(stats.getName(), new BucketSpaceStats()); } } }
this.bucketSpaces.put(stats.getName(), new BucketSpaceStats());
public ContentNodeStats(StorageNode storageNode) { this.nodeIndex = storageNode.getIndex(); for (StorageNode.BucketSpaceStats stats : storageNode.getBucketSpacesStats()) { if (stats.valid()) { this.bucketSpaces.put(stats.getName(), BucketSpaceStats.of(stats.getBucketStats().getTotal(), stats.getBucketStats().getPending())); } else { this.bucketSpaces.put(stats.getName(), BucketSpaceStats.empty()); } } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; public BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } public BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; private BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } private BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public static BucketSpaceStats empty() { return new BucketSpaceStats(); } public static BucketSpaceStats of(long bucketsTotal, long bucketsPending) { return new BucketSpaceStats(bucketsTotal, bucketsPending); } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
Could consider a `BucketSpaceStats.of(total, pending)` factory function
public ContentNodeStats(StorageNode storageNode) { this.nodeIndex = storageNode.getIndex(); for (StorageNode.BucketSpaceStats stats : storageNode.getBucketSpacesStats()) { if (stats.valid()) { this.bucketSpaces.put(stats.getName(), new BucketSpaceStats(stats.getBucketStats().getTotal(), stats.getBucketStats().getPending())); } else { this.bucketSpaces.put(stats.getName(), new BucketSpaceStats()); } } }
stats.getBucketStats().getPending()));
public ContentNodeStats(StorageNode storageNode) { this.nodeIndex = storageNode.getIndex(); for (StorageNode.BucketSpaceStats stats : storageNode.getBucketSpacesStats()) { if (stats.valid()) { this.bucketSpaces.put(stats.getName(), BucketSpaceStats.of(stats.getBucketStats().getTotal(), stats.getBucketStats().getPending())); } else { this.bucketSpaces.put(stats.getName(), BucketSpaceStats.empty()); } } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; public BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } public BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; private BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } private BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public static BucketSpaceStats empty() { return new BucketSpaceStats(); } public static BucketSpaceStats of(long bucketsTotal, long bucketsPending) { return new BucketSpaceStats(bucketsTotal, bucketsPending); } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
Could also have used something like `stats.stream().collect(Collectors.toMap(e -> e.getKey(), e-> new ContentNodeStats(e.getKey(), e.getValue())));` to avoid explicit temporary map, but this is fine too (and has better named lambda variable names).
public ContentClusterStats build() { Map<Integer, ContentNodeStats> nodeToStatsMap = new HashMap<>(); stats.forEach((nodeIndex, bucketSpaces) -> nodeToStatsMap.put(nodeIndex, new ContentNodeStats(nodeIndex, bucketSpaces))); return new ContentClusterStats(nodeToStatsMap); }
nodeToStatsMap.put(nodeIndex, new ContentNodeStats(nodeIndex, bucketSpaces)));
public ContentClusterStats build() { Map<Integer, ContentNodeStats> nodeToStatsMap = new HashMap<>(); stats.forEach((nodeIndex, bucketSpaces) -> nodeToStatsMap.put(nodeIndex, new ContentNodeStats(nodeIndex, bucketSpaces))); return new ContentClusterStats(nodeToStatsMap); }
class StatsBuilder { private final Map<Integer, Map<String, ContentNodeStats.BucketSpaceStats> > stats = new HashMap<>(); public StatsBuilder add(int nodeIndex, String bucketSpace, long bucketsTotal, long bucketsPending) { return add(nodeIndex, bucketSpace, new ContentNodeStats.BucketSpaceStats(bucketsTotal, bucketsPending)); } public StatsBuilder add(int nodeIndex, String bucketSpace) { return add(nodeIndex, bucketSpace, new ContentNodeStats.BucketSpaceStats()); } public StatsBuilder add(int nodeIndex, String bucketSpace, ContentNodeStats.BucketSpaceStats bucketSpaceStats) { Map<String, ContentNodeStats.BucketSpaceStats> contentNodeStats = stats.get(nodeIndex); if (contentNodeStats == null) { contentNodeStats = new HashMap<>(); stats.put(nodeIndex, contentNodeStats); } contentNodeStats.put(bucketSpace, bucketSpaceStats); return this; } public StatsBuilder add(int nodeIndex) { stats.put(nodeIndex, new HashMap<>()); return this; } }
class StatsBuilder { private final Map<Integer, Map<String, ContentNodeStats.BucketSpaceStats> > stats = new HashMap<>(); public StatsBuilder add(int nodeIndex, String bucketSpace, long bucketsTotal, long bucketsPending) { return add(nodeIndex, bucketSpace, ContentNodeStats.BucketSpaceStats.of(bucketsTotal, bucketsPending)); } public StatsBuilder add(int nodeIndex, String bucketSpace) { return add(nodeIndex, bucketSpace, ContentNodeStats.BucketSpaceStats.empty()); } public StatsBuilder add(int nodeIndex, String bucketSpace, ContentNodeStats.BucketSpaceStats bucketSpaceStats) { Map<String, ContentNodeStats.BucketSpaceStats> contentNodeStats = stats.get(nodeIndex); if (contentNodeStats == null) { contentNodeStats = new HashMap<>(); stats.put(nodeIndex, contentNodeStats); } contentNodeStats.put(bucketSpace, bucketSpaceStats); return this; } public StatsBuilder add(int nodeIndex) { stats.put(nodeIndex, new HashMap<>()); return this; } }
Is it technically more correct to say `&& factor > 0` here? Doesn't matter for the _current_ usages of the method, though.
private void merge(ContentNodeStats stats, int factor) { for (Map.Entry<String, BucketSpaceStats> entry : stats.bucketSpaces.entrySet()) { BucketSpaceStats statsToUpdate = bucketSpaces.get(entry.getKey()); if (statsToUpdate == null && factor == 1) { statsToUpdate = new BucketSpaceStats(); bucketSpaces.put(entry.getKey(), statsToUpdate); } if (statsToUpdate != null) { statsToUpdate.merge(entry.getValue(), factor); } } }
if (statsToUpdate == null && factor == 1) {
private void merge(ContentNodeStats stats, int factor) { for (Map.Entry<String, BucketSpaceStats> entry : stats.bucketSpaces.entrySet()) { BucketSpaceStats statsToUpdate = bucketSpaces.get(entry.getKey()); if (statsToUpdate == null && factor == 1) { statsToUpdate = new BucketSpaceStats(); bucketSpaces.put(entry.getKey(), statsToUpdate); } if (statsToUpdate != null) { statsToUpdate.merge(entry.getValue(), factor); } } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; public BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } public BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
class BucketSpaceStats { private long bucketsTotal; private long bucketsPending; private BucketSpaceStats() { this.bucketsTotal = 0; this.bucketsPending = 0; } private BucketSpaceStats(long bucketsTotal, long bucketsPending) { this.bucketsTotal = bucketsTotal; this.bucketsPending = bucketsPending; } public static BucketSpaceStats empty() { return new BucketSpaceStats(); } public static BucketSpaceStats of(long bucketsTotal, long bucketsPending) { return new BucketSpaceStats(bucketsTotal, bucketsPending); } public long getBucketsTotal() { return bucketsTotal; } public long getBucketsPending() { return bucketsPending; } public void merge(BucketSpaceStats rhs, int factor) { this.bucketsTotal += (factor * rhs.bucketsTotal); this.bucketsPending += (factor * rhs.bucketsPending); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BucketSpaceStats that = (BucketSpaceStats) o; return bucketsTotal == that.bucketsTotal && bucketsPending == that.bucketsPending; } @Override public int hashCode() { return Objects.hash(bucketsTotal, bucketsPending); } @Override public String toString() { return "{bucketsTotal=" + bucketsTotal + ", bucketsPending=" + bucketsPending + "}"; } }
Alternatively (because I want a blue bike shed!), `log.info(() -> String.format("Handshake did not work out %s: %s", endpoint, Exceptions.toMessageString(ser)));`
private ThreadState cycle(final ThreadState threadState) { switch(threadState) { case DISCONNECTED: try { if (! client.connect()) { log.log(Level.WARNING, "Connect returned null " + endpoint); drainFirstDocumentsInQueueIfOld(); return ThreadState.DISCONNECTED; } return ThreadState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Connect did not work out " + endpoint, throwable1); executeProblemsCounter.incrementAndGet(); return ThreadState.DISCONNECTED; } case CONNECTED: try { client.handshake(); successfullHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { executeProblemsCounter.incrementAndGet(); log.info("Handshake did not work out " + endpoint + ": " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); return ThreadState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); log.info("Problem with Handshake " + endpoint + ": " + Exceptions.toMessageString(throwable)); drainFirstDocumentsInQueueIfOld(); client.close(); return ThreadState.DISCONNECTED; } return ThreadState.SESSION_SYNCED; case SESSION_SYNCED: try { ProcessResponse processResponse = pullAndProcessData(100); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.info("Problems while handing data over to gateway " + endpoint + ": " + Exceptions.toMessageString(ser)); return ThreadState.CONNECTED; } catch (Throwable e) { log.info("Problems while handing data over to gateway " + endpoint + ": " + Exceptions.toMessageString(e)); client.close(); return ThreadState.DISCONNECTED; } return ThreadState.SESSION_SYNCED; default: { log.severe("Should never get here."); client.close(); return ThreadState.DISCONNECTED; } } }
log.info("Handshake did not work out " + endpoint + ": " + Exceptions.toMessageString(ser));
private ThreadState cycle(final ThreadState threadState) { switch(threadState) { case DISCONNECTED: try { if (! client.connect()) { log.log(Level.WARNING, "Connect returned null " + endpoint); drainFirstDocumentsInQueueIfOld(); return ThreadState.DISCONNECTED; } return ThreadState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Connect did not work out " + endpoint, throwable1); executeProblemsCounter.incrementAndGet(); return ThreadState.DISCONNECTED; } case CONNECTED: try { client.handshake(); successfullHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { executeProblemsCounter.incrementAndGet(); log.info("Handshake did not work out " + endpoint + ": " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); return ThreadState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); log.info("Problem with Handshake " + endpoint + ": " + Exceptions.toMessageString(throwable)); drainFirstDocumentsInQueueIfOld(); client.close(); return ThreadState.DISCONNECTED; } return ThreadState.SESSION_SYNCED; case SESSION_SYNCED: try { ProcessResponse processResponse = pullAndProcessData(100); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.info("Problems while handing data over to gateway " + endpoint + ": " + Exceptions.toMessageString(ser)); return ThreadState.CONNECTED; } catch (Throwable e) { log.info("Problems while handing data over to gateway " + endpoint + ": " + Exceptions.toMessageString(e)); client.close(); return ThreadState.DISCONNECTED; } return ThreadState.SESSION_SYNCED; default: { log.severe("Should never get here."); client.close(); return ThreadState.DISCONNECTED; } } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
The filter should distinguish 403 Forbidden (authorization failure) from 401 Unauthorized (authentication failure). Missing credentials => 401, invalid credentials => 403. See https://stackoverflow.com/a/6937030 :)
public void filter(DiscFilterRequest request, ResponseHandler handler) { Optional<X509Certificate> cert = certificateFrom(request); if (!cert.isPresent() || !authorizer.test(() -> commonName(cert.get()), request.getUri())) { responseWriter.accept(ErrorResponse.unauthorized( String.format("%s %s denied for %s: Missing or unauthorized certificate", request.getMethod(), request.getUri().getPath(), request.getRemoteAddr())), handler ); } }
if (!cert.isPresent() || !authorizer.test(() -> commonName(cert.get()), request.getUri())) {
public void filter(DiscFilterRequest request, ResponseHandler handler) { Optional<X509Certificate> cert = certificateFrom(request); if (cert.isPresent()) { if (!authorizer.test(() -> commonName(cert.get()), request.getUri())) { responseWriter.accept(ErrorResponse.forbidden( String.format("%s %s denied for %s: Invalid credentials", request.getMethod(), request.getUri().getPath(), request.getRemoteAddr())), handler ); } } else { responseWriter.accept(ErrorResponse.unauthorized( String.format("%s %s denied for %s: Missing credentials", request.getMethod(), request.getUri().getPath(), request.getRemoteAddr())), handler ); } }
class AuthorizationFilter implements SecurityRequestFilter { private static final Logger log = Logger.getLogger(AuthorizationFilter.class.getName()); private final BiPredicate<Principal, URI> authorizer; private final BiConsumer<ErrorResponse, ResponseHandler> responseWriter; @Inject public AuthorizationFilter(Zone zone, NodeRepository nodeRepository) { this(new Authorizer(zone.system(), nodeRepository), AuthorizationFilter::log); } AuthorizationFilter(Authorizer authorizer, BiConsumer<ErrorResponse, ResponseHandler> responseWriter) { this.authorizer = authorizer; this.responseWriter = responseWriter; } @Override /** Write error response */ static void write(ErrorResponse response, ResponseHandler handler) { try (FastContentWriter writer = ResponseDispatch.newInstance(response.getJdiscResponse()) .connectFastWriter(handler)) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { response.render(out); } catch (IOException e) { throw new UncheckedIOException(e); } writer.write(out.toByteArray()); } } /** Log error response without writing anything */ private static void log(ErrorResponse response, @SuppressWarnings("unused") ResponseHandler handler) { log.warning("Would reject unauthorized request: " + response.message()); } /** Read common name (CN) from certificate */ private static String commonName(X509Certificate certificate) { try { X500Name subject = new JcaX509CertificateHolder(certificate).getSubject(); RDN cn = subject.getRDNs(BCStyle.CN)[0]; return IETFUtils.valueToString(cn.getFirst().getValue()); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } } /** Get client certificate from request */ private static Optional<X509Certificate> certificateFrom(DiscFilterRequest request) { Object x509cert = request.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT); return Optional.ofNullable(x509cert) .filter(X509Certificate[].class::isInstance) .map(X509Certificate[].class::cast) .filter(certs -> certs.length > 0) .map(certs -> certs[0]); } }
class AuthorizationFilter implements SecurityRequestFilter { private static final Logger log = Logger.getLogger(AuthorizationFilter.class.getName()); private final BiPredicate<Principal, URI> authorizer; private final BiConsumer<ErrorResponse, ResponseHandler> responseWriter; @Inject public AuthorizationFilter(Zone zone, NodeRepository nodeRepository) { this(new Authorizer(zone.system(), nodeRepository), AuthorizationFilter::log); } AuthorizationFilter(BiPredicate<Principal, URI> authorizer, BiConsumer<ErrorResponse, ResponseHandler> responseWriter) { this.authorizer = authorizer; this.responseWriter = responseWriter; } @Override /** Write error response */ static void write(ErrorResponse response, ResponseHandler handler) { try (FastContentWriter writer = ResponseDispatch.newInstance(response.getJdiscResponse()) .connectFastWriter(handler)) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { response.render(out); } catch (IOException e) { throw new UncheckedIOException(e); } writer.write(out.toByteArray()); } } /** Log error response without writing anything */ private static void log(ErrorResponse response, @SuppressWarnings("unused") ResponseHandler handler) { log.warning("Would reject request: " + response.getStatus() + " - " + response.message()); } /** Read common name (CN) from certificate */ private static String commonName(X509Certificate certificate) { try { X500Name subject = new JcaX509CertificateHolder(certificate).getSubject(); RDN cn = subject.getRDNs(BCStyle.CN)[0]; return IETFUtils.valueToString(cn.getFirst().getValue()); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } } /** Get client certificate from request */ private static Optional<X509Certificate> certificateFrom(DiscFilterRequest request) { Object x509cert = request.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT); return Optional.ofNullable(x509cert) .filter(X509Certificate[].class::isInstance) .map(X509Certificate[].class::cast) .filter(certs -> certs.length > 0) .map(certs -> certs[0]); } }
This is the same as `application.witChange(application.change().with(applicationVersion)` so I think you can just replace both the if and else with just that line.
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewApplicationVersionNow(application)) { if (application.change().platform().isPresent()) { application = application.withChange(Change.of(application.change().platform().get(), applicationVersion)); } else { application = application.withChange(Change.of(applicationVersion)); } } else { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } } else if (deploymentComplete(application)) { application = application.withChange(Change.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); }
} else {
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewApplicationVersionNow(application)) { if (application.change().platform().isPresent()) { application = application.withChange(Change.of(application.change().platform().get(), applicationVersion)); } else { application = application.withChange(Change.of(applicationVersion)); } } else { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } } else if (deploymentComplete(application)) { application = application.withChange(Change.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.change().isPresent()) return true; Change change = application.change(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change.platform().isPresent()) { if (change.platform().get().isAfter(deployment.version())) return false; } if (change.application().isPresent()) { if ( ! change.application().get().equals(deployment.applicationVersion())) return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (application.change().platform().isPresent()) { Version targetVersion = application.change().platform().get(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().applicationVersion() != ApplicationVersion.unknown && ! previous.lastSuccess().get().applicationVersion().equals(next.lastSuccess().get().applicationVersion()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withChange(Change.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.change(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.change().platform().isPresent() && jobType.isProduction() && alreadyDeployed((application.change().platform().get()), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.change().isPresent()) return true; Change change = application.change(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change.platform().isPresent()) { if (change.platform().get().isAfter(deployment.version())) return false; } if (change.application().isPresent()) { if ( ! change.application().get().equals(deployment.applicationVersion())) return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (application.change().platform().isPresent()) { Version targetVersion = application.change().platform().get(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().applicationVersion() != ApplicationVersion.unknown && ! previous.lastSuccess().get().applicationVersion().equals(next.lastSuccess().get().applicationVersion()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withChange(Change.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.change(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.change().platform().isPresent() && jobType.isProduction() && alreadyDeployed((application.change().platform().get()), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Thanks, I'll fix it.
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewApplicationVersionNow(application)) { if (application.change().platform().isPresent()) { application = application.withChange(Change.of(application.change().platform().get(), applicationVersion)); } else { application = application.withChange(Change.of(applicationVersion)); } } else { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } } else if (deploymentComplete(application)) { application = application.withChange(Change.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); }
} else {
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewApplicationVersionNow(application)) { if (application.change().platform().isPresent()) { application = application.withChange(Change.of(application.change().platform().get(), applicationVersion)); } else { application = application.withChange(Change.of(applicationVersion)); } } else { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } } else if (deploymentComplete(application)) { application = application.withChange(Change.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.change().isPresent()) return true; Change change = application.change(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change.platform().isPresent()) { if (change.platform().get().isAfter(deployment.version())) return false; } if (change.application().isPresent()) { if ( ! change.application().get().equals(deployment.applicationVersion())) return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (application.change().platform().isPresent()) { Version targetVersion = application.change().platform().get(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().applicationVersion() != ApplicationVersion.unknown && ! previous.lastSuccess().get().applicationVersion().equals(next.lastSuccess().get().applicationVersion()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withChange(Change.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.change(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.change().platform().isPresent() && jobType.isProduction() && alreadyDeployed((application.change().platform().get()), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.change().isPresent()) return true; Change change = application.change(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change.platform().isPresent()) { if (change.platform().get().isAfter(deployment.version())) return false; } if (change.application().isPresent()) { if ( ! change.application().get().equals(deployment.applicationVersion())) return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (application.change().platform().isPresent()) { Version targetVersion = application.change().platform().get(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().applicationVersion() != ApplicationVersion.unknown && ! previous.lastSuccess().get().applicationVersion().equals(next.lastSuccess().get().applicationVersion()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withChange(Change.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.change(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.change().platform().isPresent() && jobType.isProduction() && alreadyDeployed((application.change().platform().get()), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
System.out.println
private ExpressionNode transformConstantReference(ReferenceNode node, RankProfileTransformContext context) { Value value = context.constants().get(node.getName()); if (value == null || value.type().rank() == 0) { return node; } TensorValue tensorValue = (TensorValue)value; String featureName = CONSTANT + "(" + node.getName() + ")"; String tensorType = tensorValue.asTensor().type().toString(); context.rankPropertiesOutput().put(featureName + ".value", tensorValue.toString()); context.rankPropertiesOutput().put(featureName + ".type", tensorType); System.out.println("==== turning " + node + " into " + new ReferenceNode(CONSTANT, Arrays.asList(new NameNode(node.getName())), null)); return new ReferenceNode(CONSTANT, Arrays.asList(new NameNode(node.getName())), null); }
System.out.println("==== turning " + node + " into " + new ReferenceNode(CONSTANT, Arrays.asList(new NameNode(node.getName())), null));
private ExpressionNode transformConstantReference(ReferenceNode node, RankProfileTransformContext context) { Value value = context.constants().get(node.getName()); if (value == null || value.type().rank() == 0) { return node; } TensorValue tensorValue = (TensorValue)value; String featureName = CONSTANT + "(" + node.getName() + ")"; String tensorType = tensorValue.asTensor().type().toString(); context.rankPropertiesOutput().put(featureName + ".value", tensorValue.toString()); context.rankPropertiesOutput().put(featureName + ".type", tensorType); return new ReferenceNode(CONSTANT, Arrays.asList(new NameNode(node.getName())), null); }
class ConstantTensorTransformer extends ExpressionTransformer<RankProfileTransformContext> { public static final String CONSTANT = "constant"; @Override public ExpressionNode transform(ExpressionNode node, RankProfileTransformContext context) { if (node instanceof ReferenceNode) { return transformFeature((ReferenceNode) node, context); } else if (node instanceof CompositeNode) { return transformChildren((CompositeNode) node, context); } else { return node; } } private ExpressionNode transformFeature(ReferenceNode node, RankProfileTransformContext context) { if ( ! node.getArguments().isEmpty() && ! FeatureNames.isSimpleFeature(node.reference())) { return transformArguments(node, context); } else { return transformConstantReference(node, context); } } private ExpressionNode transformArguments(ReferenceNode node, RankProfileTransformContext context) { List<ExpressionNode> arguments = node.getArguments().expressions(); List<ExpressionNode> transformedArguments = new ArrayList<>(arguments.size()); for (ExpressionNode argument : arguments) { transformedArguments.add(transform(argument, context)); } return node.setArguments(transformedArguments); } }
class ConstantTensorTransformer extends ExpressionTransformer<RankProfileTransformContext> { public static final String CONSTANT = "constant"; @Override public ExpressionNode transform(ExpressionNode node, RankProfileTransformContext context) { if (node instanceof ReferenceNode) { return transformFeature((ReferenceNode) node, context); } else if (node instanceof CompositeNode) { return transformChildren((CompositeNode) node, context); } else { return node; } } private ExpressionNode transformFeature(ReferenceNode node, RankProfileTransformContext context) { if ( ! node.getArguments().isEmpty() && ! FeatureNames.isSimpleFeature(node.reference())) { return transformArguments(node, context); } else { return transformConstantReference(node, context); } } private ExpressionNode transformArguments(ReferenceNode node, RankProfileTransformContext context) { List<ExpressionNode> arguments = node.getArguments().expressions(); List<ExpressionNode> transformedArguments = new ArrayList<>(arguments.size()); for (ExpressionNode argument : arguments) { transformedArguments.add(transform(argument, context)); } return node.setArguments(transformedArguments); } }
I don't see what is being logged here. The BuildJob has no toString so probably not much... I think we should include: tenant, application, screwdriverId and job (or zone/region)
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver.");
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
There is a `toString`, but there is a second `BuildJob` class now, which you probably looked at.
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver.");
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
I'm rewriting this in any case now, so will print more information. Currently, only project ID and job type are available, and those are included in the `toString`./
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver.");
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
OK, but this only includes project and name
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver.");
protected void maintain() { controller().applications().deploymentTrigger().deploymentQueue().takeJobsToRun() .forEach(buildJob -> executor.execute(() -> { log.log(Level.INFO, "Attempting to trigger " + buildJob + " in Screwdriver."); for (int i = 0; i < triggeringRetries; i++) if (buildService.trigger(buildJob)) return; log.log(Level.WARNING, "Exhausted all " + triggeringRetries + " retries for " + buildJob + " without success."); })); }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
class DeploymentJobExecutor extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentJobExecutor.class.getName()); private static final int triggeringRetries = 5; private final BuildService buildService; private final Executor executor; public DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService) { this(controller, triggeringInterval, jobControl, buildService, Executors.newFixedThreadPool(20)); } DeploymentJobExecutor(Controller controller, Duration triggeringInterval, JobControl jobControl, BuildService buildService, Executor executor) { super(controller, triggeringInterval, jobControl); this.buildService = buildService; this.executor = executor; } @Override }
`String.valueOf(bucketsPending)` feels cleaner and more obvious
private static void addBucketsPending(HtmlTable.Row row, ContentNodeStats.BucketSpaceStats bucketSpaceStats) { if (bucketSpaceStats != null) { long bucketsPending = bucketSpaceStats.getBucketsPending(); String cellValue = "" + bucketsPending; if (!bucketSpaceStats.valid()) { cellValue += "?"; } row.addCell(new HtmlTable.Cell(cellValue)); if (bucketsPending > 0 || !bucketSpaceStats.valid()) { row.getLastCell().addProperties(WARNING_PROPERTY); } } else { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } }
String cellValue = "" + bucketsPending;
private static void addBucketsPending(HtmlTable.Row row, ContentNodeStats.BucketSpaceStats bucketSpaceStats) { if (bucketSpaceStats != null) { long bucketsPending = bucketSpaceStats.getBucketsPending(); String cellValue = String.valueOf(bucketsPending); if (!bucketSpaceStats.valid()) { cellValue += "?"; } row.addCell(new HtmlTable.Cell(cellValue)); if (bucketsPending > 0 || !bucketSpaceStats.valid()) { row.getLastCell().addProperties(WARNING_PROPERTY); } } else { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } }
class Table { private final HtmlTable table = new HtmlTable(); private final HtmlTable.CellProperties headerProperties; private final StringBuilder contentBuilder = new StringBuilder(); private final static String TAG_NOT_SET = "not set"; private final static HtmlTable.CellProperties WARNING_PROPERTY = new HtmlTable.CellProperties().setBackgroundColor(0xffffc0); private final static HtmlTable.CellProperties ERROR_PROPERTY = new HtmlTable.CellProperties().setBackgroundColor(0xffc0c0); private final static HtmlTable.CellProperties CENTERED_PROPERTY = new HtmlTable.CellProperties().align(HtmlTable.Orientation.CENTER); Table(final String clusterName, final int slobrokGenerationCount) { table.getTableProperties().align(HtmlTable.Orientation.RIGHT).setBackgroundColor(0xc0ffc0); table.getColProperties(0).align(HtmlTable.Orientation.CENTER).setBackgroundColor(0xffffff); table.getColProperties(1).align(HtmlTable.Orientation.LEFT); table.getColProperties(2).align(HtmlTable.Orientation.LEFT); table.getColProperties(3).align(HtmlTable.Orientation.LEFT); table.getColProperties(7).align(HtmlTable.Orientation.LEFT); table.getColProperties(14).align(HtmlTable.Orientation.LEFT); for (int i = 4; i < 15; ++i) table.getColProperties(i).allowLineBreaks(false); headerProperties = new HtmlTable.CellProperties() .setBackgroundColor(0xffffff) .align(HtmlTable.Orientation.CENTER); contentBuilder.append("<h2>State of content cluster '") .append(clusterName) .append("'.</h2>\n") .append("<p>Based on information retrieved from slobrok at generation ") .append(slobrokGenerationCount).append(".</p>\n"); } public void addTable(final StringBuilder destination, final long stableStateTimePeriode) { destination.append(contentBuilder); destination.append(table.toString()) .append("<p>") .append("<p>"); addFooter(destination, stableStateTimePeriode); } public void renderNodes( final TreeMap<Integer, NodeInfo> storageNodeInfos, final TreeMap<Integer, NodeInfo> distributorNodeInfos, final Timer timer, final ClusterState state, final ClusterStatsAggregator statsAggregator, final int maxPrematureCrashes, final EventLog eventLog, final String pathPrefix, final String name) { final String dominantVtag = findDominantVtag( storageNodeInfos, distributorNodeInfos); renderNodesOneType(storageNodeInfos, NodeType.STORAGE, timer, state, statsAggregator, maxPrematureCrashes, eventLog, pathPrefix, dominantVtag, name); renderNodesOneType(distributorNodeInfos, NodeType.DISTRIBUTOR, timer, state, statsAggregator, maxPrematureCrashes, eventLog, pathPrefix, dominantVtag, name); } private String findDominantVtag( final Map<Integer, NodeInfo> storageNodeInfos, final Map<Integer, NodeInfo> distributorNodeInfos) { final List<NodeInfo> nodeInfos = new ArrayList<>(); nodeInfos.addAll(storageNodeInfos.values()); nodeInfos.addAll(distributorNodeInfos.values()); final Map<String, Integer> versionTagToCount = new HashMap<>(); int maxCount = -1; String dominantVtag = null; for (NodeInfo nodeInfo : nodeInfos) { final String buildTag = nodeInfo.getVtag(); Integer count = versionTagToCount.get(buildTag); count = count == null ? 1 : count + 1; versionTagToCount.put(buildTag, count); if (count > maxCount) { maxCount = count; dominantVtag = buildTag; } } return dominantVtag == null ? TAG_NOT_SET : dominantVtag; } private void addTableHeader(final String name, final NodeType nodeType) { table.addRow(new HtmlTable.Row().addCell( new HtmlTable.Cell("Group " + name) .addProperties(new HtmlTable.CellProperties() .setColSpan(0) .setBackgroundColor(0xccccff) .align(HtmlTable.Orientation.LEFT)))); table.addRow(new HtmlTable.Row() .setHeaderRow() .addProperties(headerProperties) .addProperties(new HtmlTable.CellProperties().setRowSpan(2)) .addCell(new HtmlTable.Cell(nodeType == NodeType.DISTRIBUTOR ? "Distributor" : "Storage")) .addCell(new HtmlTable.Cell("Node states") .addProperties(new HtmlTable.CellProperties().setColSpan(3).setRowSpan(1))) .addCell(new HtmlTable.Cell("Build")) .addCell(new HtmlTable.Cell("FC<sup>1)</sup>")) .addCell(new HtmlTable.Cell("OCT<sup>2)</sup>")) .addCell(new HtmlTable.Cell("SPT<sup>3)</sup>")) .addCell(new HtmlTable.Cell("SSV<sup>4)</sup>")) .addCell(new HtmlTable.Cell("PC<sup>5)</sup>")) .addCell(new HtmlTable.Cell("ELW<sup>6)</sup>")) .addCell(new HtmlTable.Cell("Buckets pending") .addProperties(new HtmlTable.CellProperties().setColSpan(2).setRowSpan(1))) .addCell(new HtmlTable.Cell("Start Time")) .addCell(new HtmlTable.Cell("RPC Address"))); table.addRow(new HtmlTable.Row().setHeaderRow().addProperties(headerProperties) .addCell(new HtmlTable.Cell("Reported")) .addCell(new HtmlTable.Cell("Wanted")) .addCell(new HtmlTable.Cell("System")) .addCell(new HtmlTable.Cell(FixedBucketSpaces.defaultSpace())) .addCell(new HtmlTable.Cell(FixedBucketSpaces.globalSpace()))); } private void renderNodesOneType( final TreeMap<Integer, NodeInfo> nodeInfos, final NodeType nodeType, final Timer timer, final ClusterState state, final ClusterStatsAggregator statsAggregator, final int maxPrematureCrashes, final EventLog eventLog, final String pathPrefix, final String dominantVtag, final String name) { final long currentTime = timer.getCurrentTimeInMillis(); addTableHeader(name, nodeType); for (final NodeInfo nodeInfo : nodeInfos.values()) { HtmlTable.Row row = new HtmlTable.Row(); row.addCell(new HtmlTable.Cell("<a href=\"" + pathPrefix + "/node=" + nodeInfo.getNode() + "\">" + nodeInfo.getNodeIndex() + "</a>")); NodeState reportedState = nodeInfo.getReportedState().clone().setStartTimestamp(0); row.addCell(new HtmlTable.Cell(HtmlTable.escape(reportedState.toString(true)))); if (!nodeInfo.getReportedState().getState().equals(State.UP)) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeInfo.getWantedState() == null || nodeInfo.getWantedState().getState().equals(State.UP)) { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(nodeInfo.getWantedState().toString(true)))); if (nodeInfo.getWantedState().toString(true).indexOf("Disabled by fleet controller") != -1) { row.getLastCell().addProperties(ERROR_PROPERTY); } else { row.getLastCell().addProperties(WARNING_PROPERTY); } } NodeState ns = state.getNodeState(nodeInfo.getNode()).clone().setDescription("").setMinUsedBits(16); if (state.getClusterState().oneOf("uir")) { row.addCell(new HtmlTable.Cell(HtmlTable.escape(ns.toString(true)))); if (ns.getState().equals(State.DOWN)) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (ns.getState().oneOf("mi")) { row.getLastCell().addProperties(WARNING_PROPERTY); } } else { row.addCell(new HtmlTable.Cell("Cluster " + state.getClusterState().name().toLowerCase()).addProperties(ERROR_PROPERTY)); } final String buildTagText = nodeInfo.getVtag() != null ? nodeInfo.getVtag() : TAG_NOT_SET; row.addCell(new HtmlTable.Cell(buildTagText)); if (! dominantVtag.equals(nodeInfo.getVtag())) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getConnectionAttemptCount())); long timeSinceContact = nodeInfo.getTimeOfFirstFailingConnectionAttempt() == 0 ? 0 : currentTime - nodeInfo.getTimeOfFirstFailingConnectionAttempt(); if (timeSinceContact > 60 * 1000) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getConnectionAttemptCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell((timeSinceContact / 1000) + " s")); if (timeSinceContact > 60 * 1000) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getConnectionAttemptCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeInfo.getLatestNodeStateRequestTime() == null) { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(RealTimer.printDuration( currentTime - nodeInfo.getLatestNodeStateRequestTime())))); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getSystemStateVersionAcknowledged())); if (nodeInfo.getSystemStateVersionAcknowledged() < state.getVersion() - 2) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getSystemStateVersionAcknowledged() < state.getVersion()) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getPrematureCrashCount())); if (nodeInfo.getPrematureCrashCount() >= maxPrematureCrashes) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getPrematureCrashCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } int nodeEvents = eventLog.getNodeEventsSince(nodeInfo.getNode(), currentTime - eventLog.getRecentTimePeriod()); row.addCell(new HtmlTable.Cell("" + nodeEvents)); if (nodeEvents > 20) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeEvents > 3) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeType.equals(NodeType.STORAGE)) { addBucketsPending(row, getStatsForContentNode(statsAggregator, nodeInfo, FixedBucketSpaces.defaultSpace())); addBucketsPending(row, getStatsForContentNode(statsAggregator, nodeInfo, FixedBucketSpaces.globalSpace())); } else { addBucketsPending(row, getStatsForDistributorNode(statsAggregator, nodeInfo, FixedBucketSpaces.defaultSpace())); addBucketsPending(row, getStatsForDistributorNode(statsAggregator, nodeInfo, FixedBucketSpaces.globalSpace())); } if (nodeInfo.getStartTimestamp() == 0) { row.addCell(new HtmlTable.Cell("-").addProperties(ERROR_PROPERTY).addProperties(CENTERED_PROPERTY)); } else { String startTime = RealTimer.printDateNoMilliSeconds( 1000 * nodeInfo.getStartTimestamp(), utcTimeZone); row.addCell(new HtmlTable.Cell(HtmlTable.escape(startTime))); } if (nodeInfo.getRpcAddress() == null) { row.addCell(new HtmlTable.Cell("-").addProperties(ERROR_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(nodeInfo.getRpcAddress()))); if (nodeInfo.isRpcAddressOutdated()) { row.getLastCell().addProperties(WARNING_PROPERTY); } } table.addRow(row); } } private static ContentNodeStats.BucketSpaceStats getStatsForContentNode(ClusterStatsAggregator statsAggregator, NodeInfo nodeInfo, String bucketSpace) { ContentNodeStats nodeStats = statsAggregator.getAggregatedStats().getContentNode(nodeInfo.getNodeIndex()); if (nodeStats != null) { return nodeStats.getBucketSpace(bucketSpace); } return null; } private static ContentNodeStats.BucketSpaceStats getStatsForDistributorNode(ClusterStatsAggregator statsAggregator, NodeInfo nodeInfo, String bucketSpace) { ContentNodeStats nodeStats = statsAggregator.getAggregatedStatsForDistributor(nodeInfo.getNodeIndex()); return nodeStats.getBucketSpace(bucketSpace); } private void addFooter(final StringBuilder contentBuilder, final long stableStateTimePeriode) { contentBuilder.append("<font size=\"-1\">\n") .append("1) FC - Failed connections - We have tried to connect to the nodes this many times " + "without being able to contact it.<br>\n") .append("2) OCT - Out of contact time - Time in seconds we have failed to contact the node.<br>\n") .append("3) SPT - State pending time - Time the current getNodeState request has been " + "pending.<br>\n") .append("4) SSV - System state version - The latest system state version the node has " + "acknowledged.<br>\n") .append("5) PC - Premature crashes - Number of times node has crashed since last time it had " + "been stable in up or down state for more than " + RealTimer.printDuration(stableStateTimePeriode) + ".<br>\n") .append("6) ELW - Events last week - The number of events that has occured on this node the " + "last week. (Or shorter period if a week haven't passed since restart or more than " + "max events to keep in node event log have happened during last week.)<br>\n") .append("</font>\n"); } }
class Table { private final HtmlTable table = new HtmlTable(); private final HtmlTable.CellProperties headerProperties; private final StringBuilder contentBuilder = new StringBuilder(); private final static String TAG_NOT_SET = "not set"; private final static HtmlTable.CellProperties WARNING_PROPERTY = new HtmlTable.CellProperties().setBackgroundColor(0xffffc0); private final static HtmlTable.CellProperties ERROR_PROPERTY = new HtmlTable.CellProperties().setBackgroundColor(0xffc0c0); private final static HtmlTable.CellProperties CENTERED_PROPERTY = new HtmlTable.CellProperties().align(HtmlTable.Orientation.CENTER); Table(final String clusterName, final int slobrokGenerationCount) { table.getTableProperties().align(HtmlTable.Orientation.RIGHT).setBackgroundColor(0xc0ffc0); table.getColProperties(0).align(HtmlTable.Orientation.CENTER).setBackgroundColor(0xffffff); table.getColProperties(1).align(HtmlTable.Orientation.LEFT); table.getColProperties(2).align(HtmlTable.Orientation.LEFT); table.getColProperties(3).align(HtmlTable.Orientation.LEFT); table.getColProperties(7).align(HtmlTable.Orientation.LEFT); table.getColProperties(14).align(HtmlTable.Orientation.LEFT); for (int i = 4; i < 15; ++i) table.getColProperties(i).allowLineBreaks(false); headerProperties = new HtmlTable.CellProperties() .setBackgroundColor(0xffffff) .align(HtmlTable.Orientation.CENTER); contentBuilder.append("<h2>State of content cluster '") .append(clusterName) .append("'.</h2>\n") .append("<p>Based on information retrieved from slobrok at generation ") .append(slobrokGenerationCount).append(".</p>\n"); } public void addTable(final StringBuilder destination, final long stableStateTimePeriode) { destination.append(contentBuilder); destination.append(table.toString()) .append("<p>") .append("<p>"); addFooter(destination, stableStateTimePeriode); } public void renderNodes( final TreeMap<Integer, NodeInfo> storageNodeInfos, final TreeMap<Integer, NodeInfo> distributorNodeInfos, final Timer timer, final ClusterState state, final ClusterStatsAggregator statsAggregator, final int maxPrematureCrashes, final EventLog eventLog, final String pathPrefix, final String name) { final String dominantVtag = findDominantVtag( storageNodeInfos, distributorNodeInfos); renderNodesOneType(storageNodeInfos, NodeType.STORAGE, timer, state, statsAggregator, maxPrematureCrashes, eventLog, pathPrefix, dominantVtag, name); renderNodesOneType(distributorNodeInfos, NodeType.DISTRIBUTOR, timer, state, statsAggregator, maxPrematureCrashes, eventLog, pathPrefix, dominantVtag, name); } private String findDominantVtag( final Map<Integer, NodeInfo> storageNodeInfos, final Map<Integer, NodeInfo> distributorNodeInfos) { final List<NodeInfo> nodeInfos = new ArrayList<>(); nodeInfos.addAll(storageNodeInfos.values()); nodeInfos.addAll(distributorNodeInfos.values()); final Map<String, Integer> versionTagToCount = new HashMap<>(); int maxCount = -1; String dominantVtag = null; for (NodeInfo nodeInfo : nodeInfos) { final String buildTag = nodeInfo.getVtag(); Integer count = versionTagToCount.get(buildTag); count = count == null ? 1 : count + 1; versionTagToCount.put(buildTag, count); if (count > maxCount) { maxCount = count; dominantVtag = buildTag; } } return dominantVtag == null ? TAG_NOT_SET : dominantVtag; } private void addTableHeader(final String name, final NodeType nodeType) { table.addRow(new HtmlTable.Row().addCell( new HtmlTable.Cell("Group " + name) .addProperties(new HtmlTable.CellProperties() .setColSpan(0) .setBackgroundColor(0xccccff) .align(HtmlTable.Orientation.LEFT)))); table.addRow(new HtmlTable.Row() .setHeaderRow() .addProperties(headerProperties) .addProperties(new HtmlTable.CellProperties().setRowSpan(2)) .addCell(new HtmlTable.Cell(nodeType == NodeType.DISTRIBUTOR ? "Distributor" : "Storage")) .addCell(new HtmlTable.Cell("Node states") .addProperties(new HtmlTable.CellProperties().setColSpan(3).setRowSpan(1))) .addCell(new HtmlTable.Cell("Build")) .addCell(new HtmlTable.Cell("FC<sup>1)</sup>")) .addCell(new HtmlTable.Cell("OCT<sup>2)</sup>")) .addCell(new HtmlTable.Cell("SPT<sup>3)</sup>")) .addCell(new HtmlTable.Cell("SSV<sup>4)</sup>")) .addCell(new HtmlTable.Cell("PC<sup>5)</sup>")) .addCell(new HtmlTable.Cell("ELW<sup>6)</sup>")) .addCell(new HtmlTable.Cell("Buckets pending") .addProperties(new HtmlTable.CellProperties().setColSpan(2).setRowSpan(1))) .addCell(new HtmlTable.Cell("Start Time")) .addCell(new HtmlTable.Cell("RPC Address"))); table.addRow(new HtmlTable.Row().setHeaderRow().addProperties(headerProperties) .addCell(new HtmlTable.Cell("Reported")) .addCell(new HtmlTable.Cell("Wanted")) .addCell(new HtmlTable.Cell("System")) .addCell(new HtmlTable.Cell(FixedBucketSpaces.defaultSpace())) .addCell(new HtmlTable.Cell(FixedBucketSpaces.globalSpace()))); } private void renderNodesOneType( final TreeMap<Integer, NodeInfo> nodeInfos, final NodeType nodeType, final Timer timer, final ClusterState state, final ClusterStatsAggregator statsAggregator, final int maxPrematureCrashes, final EventLog eventLog, final String pathPrefix, final String dominantVtag, final String name) { final long currentTime = timer.getCurrentTimeInMillis(); addTableHeader(name, nodeType); for (final NodeInfo nodeInfo : nodeInfos.values()) { HtmlTable.Row row = new HtmlTable.Row(); row.addCell(new HtmlTable.Cell("<a href=\"" + pathPrefix + "/node=" + nodeInfo.getNode() + "\">" + nodeInfo.getNodeIndex() + "</a>")); NodeState reportedState = nodeInfo.getReportedState().clone().setStartTimestamp(0); row.addCell(new HtmlTable.Cell(HtmlTable.escape(reportedState.toString(true)))); if (!nodeInfo.getReportedState().getState().equals(State.UP)) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeInfo.getWantedState() == null || nodeInfo.getWantedState().getState().equals(State.UP)) { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(nodeInfo.getWantedState().toString(true)))); if (nodeInfo.getWantedState().toString(true).indexOf("Disabled by fleet controller") != -1) { row.getLastCell().addProperties(ERROR_PROPERTY); } else { row.getLastCell().addProperties(WARNING_PROPERTY); } } NodeState ns = state.getNodeState(nodeInfo.getNode()).clone().setDescription("").setMinUsedBits(16); if (state.getClusterState().oneOf("uir")) { row.addCell(new HtmlTable.Cell(HtmlTable.escape(ns.toString(true)))); if (ns.getState().equals(State.DOWN)) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (ns.getState().oneOf("mi")) { row.getLastCell().addProperties(WARNING_PROPERTY); } } else { row.addCell(new HtmlTable.Cell("Cluster " + state.getClusterState().name().toLowerCase()).addProperties(ERROR_PROPERTY)); } final String buildTagText = nodeInfo.getVtag() != null ? nodeInfo.getVtag() : TAG_NOT_SET; row.addCell(new HtmlTable.Cell(buildTagText)); if (! dominantVtag.equals(nodeInfo.getVtag())) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getConnectionAttemptCount())); long timeSinceContact = nodeInfo.getTimeOfFirstFailingConnectionAttempt() == 0 ? 0 : currentTime - nodeInfo.getTimeOfFirstFailingConnectionAttempt(); if (timeSinceContact > 60 * 1000) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getConnectionAttemptCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell((timeSinceContact / 1000) + " s")); if (timeSinceContact > 60 * 1000) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getConnectionAttemptCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeInfo.getLatestNodeStateRequestTime() == null) { row.addCell(new HtmlTable.Cell("-").addProperties(CENTERED_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(RealTimer.printDuration( currentTime - nodeInfo.getLatestNodeStateRequestTime())))); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getSystemStateVersionAcknowledged())); if (nodeInfo.getSystemStateVersionAcknowledged() < state.getVersion() - 2) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getSystemStateVersionAcknowledged() < state.getVersion()) { row.getLastCell().addProperties(WARNING_PROPERTY); } row.addCell(new HtmlTable.Cell("" + nodeInfo.getPrematureCrashCount())); if (nodeInfo.getPrematureCrashCount() >= maxPrematureCrashes) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeInfo.getPrematureCrashCount() > 0) { row.getLastCell().addProperties(WARNING_PROPERTY); } int nodeEvents = eventLog.getNodeEventsSince(nodeInfo.getNode(), currentTime - eventLog.getRecentTimePeriod()); row.addCell(new HtmlTable.Cell("" + nodeEvents)); if (nodeEvents > 20) { row.getLastCell().addProperties(ERROR_PROPERTY); } else if (nodeEvents > 3) { row.getLastCell().addProperties(WARNING_PROPERTY); } if (nodeType.equals(NodeType.STORAGE)) { addBucketsPending(row, getStatsForContentNode(statsAggregator, nodeInfo, FixedBucketSpaces.defaultSpace())); addBucketsPending(row, getStatsForContentNode(statsAggregator, nodeInfo, FixedBucketSpaces.globalSpace())); } else { addBucketsPending(row, getStatsForDistributorNode(statsAggregator, nodeInfo, FixedBucketSpaces.defaultSpace())); addBucketsPending(row, getStatsForDistributorNode(statsAggregator, nodeInfo, FixedBucketSpaces.globalSpace())); } if (nodeInfo.getStartTimestamp() == 0) { row.addCell(new HtmlTable.Cell("-").addProperties(ERROR_PROPERTY).addProperties(CENTERED_PROPERTY)); } else { String startTime = RealTimer.printDateNoMilliSeconds( 1000 * nodeInfo.getStartTimestamp(), utcTimeZone); row.addCell(new HtmlTable.Cell(HtmlTable.escape(startTime))); } if (nodeInfo.getRpcAddress() == null) { row.addCell(new HtmlTable.Cell("-").addProperties(ERROR_PROPERTY)); } else { row.addCell(new HtmlTable.Cell(HtmlTable.escape(nodeInfo.getRpcAddress()))); if (nodeInfo.isRpcAddressOutdated()) { row.getLastCell().addProperties(WARNING_PROPERTY); } } table.addRow(row); } } private static ContentNodeStats.BucketSpaceStats getStatsForContentNode(ClusterStatsAggregator statsAggregator, NodeInfo nodeInfo, String bucketSpace) { ContentNodeStats nodeStats = statsAggregator.getAggregatedStats().getContentNode(nodeInfo.getNodeIndex()); if (nodeStats != null) { return nodeStats.getBucketSpace(bucketSpace); } return null; } private static ContentNodeStats.BucketSpaceStats getStatsForDistributorNode(ClusterStatsAggregator statsAggregator, NodeInfo nodeInfo, String bucketSpace) { ContentNodeStats nodeStats = statsAggregator.getAggregatedStatsForDistributor(nodeInfo.getNodeIndex()); return nodeStats.getBucketSpace(bucketSpace); } private void addFooter(final StringBuilder contentBuilder, final long stableStateTimePeriode) { contentBuilder.append("<font size=\"-1\">\n") .append("1) FC - Failed connections - We have tried to connect to the nodes this many times " + "without being able to contact it.<br>\n") .append("2) OCT - Out of contact time - Time in seconds we have failed to contact the node.<br>\n") .append("3) SPT - State pending time - Time the current getNodeState request has been " + "pending.<br>\n") .append("4) SSV - System state version - The latest system state version the node has " + "acknowledged.<br>\n") .append("5) PC - Premature crashes - Number of times node has crashed since last time it had " + "been stable in up or down state for more than " + RealTimer.printDuration(stableStateTimePeriode) + ".<br>\n") .append("6) ELW - Events last week - The number of events that has occured on this node the " + "last week. (Or shorter period if a week haven't passed since restart or more than " + "max events to keep in node event log have happened during last week.)<br>\n") .append("</font>\n"); } }
Should you not still catch Throwable ? Exception, NoClassDefFoundError and Error does not cover the whole world
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e); } }
log.info("Finished deconstructing " + component);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Add shutting down or something to the log message. "Shutting down due to error when deconstruction .. "
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e); } }
com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Exception and Error does cover the whole world. .... unless we live in an unimaginably evil one. Ok, I guess that's worth hedging against.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e); } }
log.info("Finished deconstructing " + component);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Done.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e); } }
com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, e);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
I suggest to log both before and after sleeping. Before e.g.: "Will shut down within 10 minutes due to an error thrown when deconstructing ..."
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component, e); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
Thread.sleep(random() * 1000 * 60 * 10);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Ok, done. In this module we do have LogLevel available so this allowed me to log this as FATAL as well.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component, e); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
Thread.sleep(random() * 1000 * 60 * 10);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Have you remembered to push it? I can't see the change. Github is very slow right now though.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { Thread.sleep(random() * 1000 * 60 * 10); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component, e); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
Thread.sleep(random() * 1000 * 60 * 10);
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Nitpicking, but you forgot to put a '.' after the component. `(". Will sleep...)`
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " +
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
This is a resource leak, as the component might not have freed it's resources. However, including NoClassDef error doesn't make it that much worse. But it's something to look for if the container runs out of memory.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
catch (Exception | NoClassDefFoundError e) {
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Fixed.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " +
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
Agree on both points.
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + " Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
catch (Exception | NoClassDefFoundError e) {
public void run() { log.info("Starting deconstruction of " + component); try { component.deconstruct(); log.info("Finished deconstructing " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing " + component, e); } catch (Error e) { try { long randomSleepSeconds = random() * 60 * 10; log.log(LogLevel.FATAL, "Error when deconstructing " + component + ". Will sleep for " + randomSleepSeconds + " seconds then restart", e); Thread.sleep(randomSleepSeconds * 1000); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing " + component, e); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
class DestructComponentTask implements Runnable { private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** Returns a random value which will be different across identical containers invoking this at the same time */ private long random() { return new SecureRandom().nextLong(); } }
What should happen if reading the keystore fails? Currently this will fail starting the config server. Intentional?
private static Optional<KeyStore> tryReadKeystoreFile(File certificateFile, Duration updatePeriod) { try { if (!certificateFile.exists()) return Optional.empty(); KeyStore keyStore = KeyStore.getInstance("JKS"); try (InputStream in = new BufferedInputStream(new FileInputStream(certificateFile))) { keyStore.load(in, new char[0]); } Instant minimumExpiration = Instant.now().plus(updatePeriod).plus(EXPIRATION_MARGIN); boolean isExpired = getKeyStoreExpiry(keyStore).isBefore(minimumExpiration); if (isExpired) return Optional.empty(); return Optional.of(keyStore); } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } }
throw new RuntimeException(e);
private static Optional<KeyStore> tryReadKeystoreFile(File certificateFile, Duration updatePeriod) { try { if (!certificateFile.exists()) return Optional.empty(); KeyStore keyStore = KeyStore.getInstance("JKS"); try (InputStream in = new BufferedInputStream(new FileInputStream(certificateFile))) { keyStore.load(in, new char[0]); } Instant minimumExpiration = Instant.now().plus(updatePeriod).plus(EXPIRATION_MARGIN); boolean isExpired = getCertificateExpiry(keyStore).isBefore(minimumExpiration); if (isExpired) return Optional.empty(); return Optional.of(keyStore); } catch (IOException | GeneralSecurityException e) { log.log(LogLevel.ERROR, "Failed to read keystore from disk: " + e.getMessage(), e); return Optional.empty(); } }
class AthenzSslKeyStoreConfigurator extends AbstractComponent implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(AthenzSslKeyStoreConfigurator.class.getName()); private static final String CERTIFICATE_ALIAS = "athenz"; private static final String CERTIFICATE_PASSWORD = "athenz"; private static final Duration EXPIRATION_MARGIN = Duration.ofHours(6); private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); private final AthenzCertificateClient certificateClient; private final KeyProvider keyProvider; private final AthenzProviderServiceConfig.Zones zoneConfig; private final Duration updatePeriod; private final Path keystoreCachePath; private volatile KeyStore currentKeyStore; @Inject public AthenzSslKeyStoreConfigurator(KeyProvider keyProvider, AthenzProviderServiceConfig config, Zone zone, ConfigserverConfig configserverConfig) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); Path keystoreCachePath = createKeystoreCachePath(configserverConfig); AthenzCertificateClient certificateClient = new AthenzCertificateClient(config, zoneConfig); Duration updatePeriod = Duration.ofDays(config.updatePeriodDays()); this.certificateClient = certificateClient; this.keyProvider = keyProvider; this.zoneConfig = zoneConfig; this.currentKeyStore = initializeKeystore(keyProvider, certificateClient, zoneConfig, keystoreCachePath, updatePeriod); this.updatePeriod = updatePeriod; this.keystoreCachePath = keystoreCachePath; } private static KeyStore initializeKeystore(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath, Duration updatePeriod) { return tryReadKeystoreFile(keystoreCachePath.toFile(), updatePeriod) .orElseGet(() -> downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath)); } private static Path createKeystoreCachePath(ConfigserverConfig configserverConfig) { return Paths.get( Defaults.getDefaults().underVespaHome(configserverConfig.configServerDBDir()), "server-x509-athenz-cert.jks"); } @Override public void configure(SslKeyStoreContext sslKeyStoreContext) { sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); scheduler.scheduleAtFixedRate(new AthenzCertificateUpdater(sslKeyStoreContext), updatePeriod.toDays()/*initial delay*/, updatePeriod.toDays(), TimeUnit.DAYS); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(30, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException("Failed to shutdown Athenz certificate updater on time", e); } } Instant getKeyStoreExpiry() throws KeyStoreException { return getKeyStoreExpiry(currentKeyStore); } private static Instant getKeyStoreExpiry(KeyStore keyStore) throws KeyStoreException { X509Certificate certificate = (X509Certificate) keyStore.getCertificate(CERTIFICATE_ALIAS); return certificate.getNotAfter().toInstant(); } private static KeyStore downloadCertificate(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath) { try { PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion()); X509Certificate certificate = certificateClient.updateCertificate(privateKey); Instant expirationTime = certificate.getNotAfter().toInstant(); Duration expiry = Duration.between(certificate.getNotBefore().toInstant(), expirationTime); log.log(LogLevel.INFO, String.format("Got Athenz x509 certificate with expiry %s (expires %s)", expiry, expirationTime)); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry( CERTIFICATE_ALIAS, privateKey, CERTIFICATE_PASSWORD.toCharArray(), new Certificate[]{certificate}); writeKeystore(keyStore, keystoreCachePath); return keyStore; } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private static void writeKeystore(KeyStore keyStore, Path keystoreCachePath) { try (OutputStream out = new BufferedOutputStream(new FileOutputStream(keystoreCachePath.toFile()))) { keyStore.store(out, new char[0]); } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private class AthenzCertificateUpdater implements Runnable { private final SslKeyStoreContext sslKeyStoreContext; AthenzCertificateUpdater(SslKeyStoreContext sslKeyStoreContext) { this.sslKeyStoreContext = sslKeyStoreContext; } @Override public void run() { try { log.log(LogLevel.INFO, "Updating Athenz certificate from ZTS"); currentKeyStore = downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath); sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); log.log(LogLevel.INFO, "Athenz certificate reload successfully completed"); } catch (Throwable e) { log.log(LogLevel.ERROR, "Failed to update certificate from ZTS: " + e.getMessage(), e); } } } }
class AthenzSslKeyStoreConfigurator extends AbstractComponent implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(AthenzSslKeyStoreConfigurator.class.getName()); private static final String CERTIFICATE_ALIAS = "athenz"; private static final String CERTIFICATE_PASSWORD = "athenz"; private static final Duration EXPIRATION_MARGIN = Duration.ofHours(6); private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); private final AthenzCertificateClient certificateClient; private final KeyProvider keyProvider; private final AthenzProviderServiceConfig.Zones zoneConfig; private final Duration updatePeriod; private final Path keystoreCachePath; private volatile KeyStore currentKeyStore; @Inject public AthenzSslKeyStoreConfigurator(KeyProvider keyProvider, AthenzProviderServiceConfig config, Zone zone, ConfigserverConfig configserverConfig) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); Path keystoreCachePath = createKeystoreCachePath(configserverConfig); AthenzCertificateClient certificateClient = new AthenzCertificateClient(config, zoneConfig); Duration updatePeriod = Duration.ofDays(config.updatePeriodDays()); this.certificateClient = certificateClient; this.keyProvider = keyProvider; this.zoneConfig = zoneConfig; this.currentKeyStore = initializeKeystore(keyProvider, certificateClient, zoneConfig, keystoreCachePath, updatePeriod); this.updatePeriod = updatePeriod; this.keystoreCachePath = keystoreCachePath; } private static KeyStore initializeKeystore(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath, Duration updatePeriod) { return tryReadKeystoreFile(keystoreCachePath.toFile(), updatePeriod) .orElseGet(() -> downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath)); } private static Path createKeystoreCachePath(ConfigserverConfig configserverConfig) { return Paths.get( Defaults.getDefaults().underVespaHome(configserverConfig.configServerDBDir()), "server-x509-athenz-cert.jks"); } @Override public void configure(SslKeyStoreContext sslKeyStoreContext) { sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); scheduler.scheduleAtFixedRate(new AthenzCertificateUpdater(sslKeyStoreContext), updatePeriod.toDays()/*initial delay*/, updatePeriod.toDays(), TimeUnit.DAYS); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(30, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException("Failed to shutdown Athenz certificate updater on time", e); } } Instant getCertificateExpiry() throws KeyStoreException { return getCertificateExpiry(currentKeyStore); } private static Instant getCertificateExpiry(KeyStore keyStore) throws KeyStoreException { X509Certificate certificate = (X509Certificate) keyStore.getCertificate(CERTIFICATE_ALIAS); return certificate.getNotAfter().toInstant(); } private static KeyStore downloadCertificate(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath) { try { PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion()); X509Certificate certificate = certificateClient.updateCertificate(privateKey); Instant expirationTime = certificate.getNotAfter().toInstant(); Duration expiry = Duration.between(certificate.getNotBefore().toInstant(), expirationTime); log.log(LogLevel.INFO, String.format("Got Athenz x509 certificate with expiry %s (expires %s)", expiry, expirationTime)); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry( CERTIFICATE_ALIAS, privateKey, CERTIFICATE_PASSWORD.toCharArray(), new Certificate[]{certificate}); tryWriteKeystore(keyStore, keystoreCachePath); return keyStore; } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private static void tryWriteKeystore(KeyStore keyStore, Path keystoreCachePath) { try (OutputStream out = new BufferedOutputStream(new FileOutputStream(keystoreCachePath.toFile()))) { keyStore.store(out, new char[0]); } catch (IOException | GeneralSecurityException e) { log.log(LogLevel.ERROR, "Failed to write keystore to disk: " + e.getMessage(), e); } } private class AthenzCertificateUpdater implements Runnable { private final SslKeyStoreContext sslKeyStoreContext; AthenzCertificateUpdater(SslKeyStoreContext sslKeyStoreContext) { this.sslKeyStoreContext = sslKeyStoreContext; } @Override public void run() { try { log.log(LogLevel.INFO, "Updating Athenz certificate from ZTS"); currentKeyStore = downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath); sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); log.log(LogLevel.INFO, "Athenz certificate reload successfully completed"); } catch (Throwable e) { log.log(LogLevel.ERROR, "Failed to update certificate from ZTS: " + e.getMessage(), e); } } } }
Currently yes. I am open to change this semantic.
private static Optional<KeyStore> tryReadKeystoreFile(File certificateFile, Duration updatePeriod) { try { if (!certificateFile.exists()) return Optional.empty(); KeyStore keyStore = KeyStore.getInstance("JKS"); try (InputStream in = new BufferedInputStream(new FileInputStream(certificateFile))) { keyStore.load(in, new char[0]); } Instant minimumExpiration = Instant.now().plus(updatePeriod).plus(EXPIRATION_MARGIN); boolean isExpired = getKeyStoreExpiry(keyStore).isBefore(minimumExpiration); if (isExpired) return Optional.empty(); return Optional.of(keyStore); } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } }
throw new RuntimeException(e);
private static Optional<KeyStore> tryReadKeystoreFile(File certificateFile, Duration updatePeriod) { try { if (!certificateFile.exists()) return Optional.empty(); KeyStore keyStore = KeyStore.getInstance("JKS"); try (InputStream in = new BufferedInputStream(new FileInputStream(certificateFile))) { keyStore.load(in, new char[0]); } Instant minimumExpiration = Instant.now().plus(updatePeriod).plus(EXPIRATION_MARGIN); boolean isExpired = getCertificateExpiry(keyStore).isBefore(minimumExpiration); if (isExpired) return Optional.empty(); return Optional.of(keyStore); } catch (IOException | GeneralSecurityException e) { log.log(LogLevel.ERROR, "Failed to read keystore from disk: " + e.getMessage(), e); return Optional.empty(); } }
class AthenzSslKeyStoreConfigurator extends AbstractComponent implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(AthenzSslKeyStoreConfigurator.class.getName()); private static final String CERTIFICATE_ALIAS = "athenz"; private static final String CERTIFICATE_PASSWORD = "athenz"; private static final Duration EXPIRATION_MARGIN = Duration.ofHours(6); private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); private final AthenzCertificateClient certificateClient; private final KeyProvider keyProvider; private final AthenzProviderServiceConfig.Zones zoneConfig; private final Duration updatePeriod; private final Path keystoreCachePath; private volatile KeyStore currentKeyStore; @Inject public AthenzSslKeyStoreConfigurator(KeyProvider keyProvider, AthenzProviderServiceConfig config, Zone zone, ConfigserverConfig configserverConfig) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); Path keystoreCachePath = createKeystoreCachePath(configserverConfig); AthenzCertificateClient certificateClient = new AthenzCertificateClient(config, zoneConfig); Duration updatePeriod = Duration.ofDays(config.updatePeriodDays()); this.certificateClient = certificateClient; this.keyProvider = keyProvider; this.zoneConfig = zoneConfig; this.currentKeyStore = initializeKeystore(keyProvider, certificateClient, zoneConfig, keystoreCachePath, updatePeriod); this.updatePeriod = updatePeriod; this.keystoreCachePath = keystoreCachePath; } private static KeyStore initializeKeystore(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath, Duration updatePeriod) { return tryReadKeystoreFile(keystoreCachePath.toFile(), updatePeriod) .orElseGet(() -> downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath)); } private static Path createKeystoreCachePath(ConfigserverConfig configserverConfig) { return Paths.get( Defaults.getDefaults().underVespaHome(configserverConfig.configServerDBDir()), "server-x509-athenz-cert.jks"); } @Override public void configure(SslKeyStoreContext sslKeyStoreContext) { sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); scheduler.scheduleAtFixedRate(new AthenzCertificateUpdater(sslKeyStoreContext), updatePeriod.toDays()/*initial delay*/, updatePeriod.toDays(), TimeUnit.DAYS); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(30, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException("Failed to shutdown Athenz certificate updater on time", e); } } Instant getKeyStoreExpiry() throws KeyStoreException { return getKeyStoreExpiry(currentKeyStore); } private static Instant getKeyStoreExpiry(KeyStore keyStore) throws KeyStoreException { X509Certificate certificate = (X509Certificate) keyStore.getCertificate(CERTIFICATE_ALIAS); return certificate.getNotAfter().toInstant(); } private static KeyStore downloadCertificate(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath) { try { PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion()); X509Certificate certificate = certificateClient.updateCertificate(privateKey); Instant expirationTime = certificate.getNotAfter().toInstant(); Duration expiry = Duration.between(certificate.getNotBefore().toInstant(), expirationTime); log.log(LogLevel.INFO, String.format("Got Athenz x509 certificate with expiry %s (expires %s)", expiry, expirationTime)); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry( CERTIFICATE_ALIAS, privateKey, CERTIFICATE_PASSWORD.toCharArray(), new Certificate[]{certificate}); writeKeystore(keyStore, keystoreCachePath); return keyStore; } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private static void writeKeystore(KeyStore keyStore, Path keystoreCachePath) { try (OutputStream out = new BufferedOutputStream(new FileOutputStream(keystoreCachePath.toFile()))) { keyStore.store(out, new char[0]); } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private class AthenzCertificateUpdater implements Runnable { private final SslKeyStoreContext sslKeyStoreContext; AthenzCertificateUpdater(SslKeyStoreContext sslKeyStoreContext) { this.sslKeyStoreContext = sslKeyStoreContext; } @Override public void run() { try { log.log(LogLevel.INFO, "Updating Athenz certificate from ZTS"); currentKeyStore = downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath); sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); log.log(LogLevel.INFO, "Athenz certificate reload successfully completed"); } catch (Throwable e) { log.log(LogLevel.ERROR, "Failed to update certificate from ZTS: " + e.getMessage(), e); } } } }
class AthenzSslKeyStoreConfigurator extends AbstractComponent implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(AthenzSslKeyStoreConfigurator.class.getName()); private static final String CERTIFICATE_ALIAS = "athenz"; private static final String CERTIFICATE_PASSWORD = "athenz"; private static final Duration EXPIRATION_MARGIN = Duration.ofHours(6); private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); private final AthenzCertificateClient certificateClient; private final KeyProvider keyProvider; private final AthenzProviderServiceConfig.Zones zoneConfig; private final Duration updatePeriod; private final Path keystoreCachePath; private volatile KeyStore currentKeyStore; @Inject public AthenzSslKeyStoreConfigurator(KeyProvider keyProvider, AthenzProviderServiceConfig config, Zone zone, ConfigserverConfig configserverConfig) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); Path keystoreCachePath = createKeystoreCachePath(configserverConfig); AthenzCertificateClient certificateClient = new AthenzCertificateClient(config, zoneConfig); Duration updatePeriod = Duration.ofDays(config.updatePeriodDays()); this.certificateClient = certificateClient; this.keyProvider = keyProvider; this.zoneConfig = zoneConfig; this.currentKeyStore = initializeKeystore(keyProvider, certificateClient, zoneConfig, keystoreCachePath, updatePeriod); this.updatePeriod = updatePeriod; this.keystoreCachePath = keystoreCachePath; } private static KeyStore initializeKeystore(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath, Duration updatePeriod) { return tryReadKeystoreFile(keystoreCachePath.toFile(), updatePeriod) .orElseGet(() -> downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath)); } private static Path createKeystoreCachePath(ConfigserverConfig configserverConfig) { return Paths.get( Defaults.getDefaults().underVespaHome(configserverConfig.configServerDBDir()), "server-x509-athenz-cert.jks"); } @Override public void configure(SslKeyStoreContext sslKeyStoreContext) { sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); scheduler.scheduleAtFixedRate(new AthenzCertificateUpdater(sslKeyStoreContext), updatePeriod.toDays()/*initial delay*/, updatePeriod.toDays(), TimeUnit.DAYS); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(30, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException("Failed to shutdown Athenz certificate updater on time", e); } } Instant getCertificateExpiry() throws KeyStoreException { return getCertificateExpiry(currentKeyStore); } private static Instant getCertificateExpiry(KeyStore keyStore) throws KeyStoreException { X509Certificate certificate = (X509Certificate) keyStore.getCertificate(CERTIFICATE_ALIAS); return certificate.getNotAfter().toInstant(); } private static KeyStore downloadCertificate(KeyProvider keyProvider, AthenzCertificateClient certificateClient, AthenzProviderServiceConfig.Zones zoneConfig, Path keystoreCachePath) { try { PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion()); X509Certificate certificate = certificateClient.updateCertificate(privateKey); Instant expirationTime = certificate.getNotAfter().toInstant(); Duration expiry = Duration.between(certificate.getNotBefore().toInstant(), expirationTime); log.log(LogLevel.INFO, String.format("Got Athenz x509 certificate with expiry %s (expires %s)", expiry, expirationTime)); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry( CERTIFICATE_ALIAS, privateKey, CERTIFICATE_PASSWORD.toCharArray(), new Certificate[]{certificate}); tryWriteKeystore(keyStore, keystoreCachePath); return keyStore; } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private static void tryWriteKeystore(KeyStore keyStore, Path keystoreCachePath) { try (OutputStream out = new BufferedOutputStream(new FileOutputStream(keystoreCachePath.toFile()))) { keyStore.store(out, new char[0]); } catch (IOException | GeneralSecurityException e) { log.log(LogLevel.ERROR, "Failed to write keystore to disk: " + e.getMessage(), e); } } private class AthenzCertificateUpdater implements Runnable { private final SslKeyStoreContext sslKeyStoreContext; AthenzCertificateUpdater(SslKeyStoreContext sslKeyStoreContext) { this.sslKeyStoreContext = sslKeyStoreContext; } @Override public void run() { try { log.log(LogLevel.INFO, "Updating Athenz certificate from ZTS"); currentKeyStore = downloadCertificate(keyProvider, certificateClient, zoneConfig, keystoreCachePath); sslKeyStoreContext.updateKeyStore(currentKeyStore, CERTIFICATE_PASSWORD); log.log(LogLevel.INFO, "Athenz certificate reload successfully completed"); } catch (Throwable e) { log.log(LogLevel.ERROR, "Failed to update certificate from ZTS: " + e.getMessage(), e); } } } }
5 should be a constant.
private void registerInstance() { try { credentials = athenzCredentialsService.registerInstance(); scheduler.scheduleAtFixedRate(this::refreshCertificate, 0, 5, TimeUnit.MINUTES); scheduler.scheduleAtFixedRate(this::reportMetrics, UPDATE_PERIOD.toMinutes(), UPDATE_PERIOD.toMinutes(), TimeUnit.MINUTES); } catch (Throwable t) { throw new AthenzIdentityProviderException("Could not retrieve Athenz credentials", t); } }
scheduler.scheduleAtFixedRate(this::refreshCertificate, 0, 5, TimeUnit.MINUTES);
private void registerInstance() { try { credentials = athenzCredentialsService.registerInstance(); scheduler.scheduleAtFixedRate(this::refreshCertificate, 0, 5, TimeUnit.MINUTES); scheduler.scheduleAtFixedRate(this::reportMetrics, UPDATE_PERIOD.toMinutes(), UPDATE_PERIOD.toMinutes(), TimeUnit.MINUTES); } catch (Throwable t) { throw new AthenzIdentityProviderException("Could not retrieve Athenz credentials", t); } }
class AthenzIdentityProviderImpl extends AbstractComponent implements AthenzIdentityProvider { private static final Logger log = Logger.getLogger(AthenzIdentityProviderImpl.class.getName()); static final Duration UPDATE_PERIOD = Duration.ofDays(1); static final Duration AWAIT_TERMINTATION_TIMEOUT = Duration.ofSeconds(90); public static final String CERTIFICATE_EXPIRY_METRIC_NAME = "athenz-tenant-cert.expiry.seconds"; private volatile AthenzCredentials credentials; private final Metric metric; private final AthenzCredentialsService athenzCredentialsService; private final ScheduledExecutorService scheduler; private final Clock clock; private final String domain; private final String service; @Inject public AthenzIdentityProviderImpl(IdentityConfig config, Metric metric) { this(config, metric, new AthenzCredentialsService(config, new IdentityDocumentService(config.loadBalancerAddress()), new AthenzService(), Clock.systemUTC()), new ScheduledThreadPoolExecutor(1), Clock.systemUTC()); } AthenzIdentityProviderImpl(IdentityConfig config, Metric metric, AthenzCredentialsService athenzCredentialsService, ScheduledExecutorService scheduler, Clock clock) { this.metric = metric; this.athenzCredentialsService = athenzCredentialsService; this.scheduler = scheduler; this.clock = clock; this.domain = config.domain(); this.service = config.service(); registerInstance(); } @Override public String getDomain() { return domain; } @Override public String getService() { return service; } @Override public SSLContext getIdentitySslContext() { return new AthenzSslContextBuilder() .withIdentityCertificate(new AthenzIdentityCertificate( credentials.getCertificate(), credentials.getKeyPair().getPrivate())) .withTrustStore(new File(Defaults.getDefaults().underVespaHome("share/ssl/certs/yahoo_certificate_bundle.jks")), "JKS") .build(); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(AWAIT_TERMINTATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException(e); } } private boolean isExpired(AthenzCredentials credentials) { return clock.instant().isAfter(getExpirationTime(credentials)); } private static Instant getExpirationTime(AthenzCredentials credentials) { return credentials.getCertificate().getNotAfter().toInstant(); } void refreshCertificate() { try { AthenzCredentials newCredentials = isExpired(credentials) ? athenzCredentialsService.registerInstance() : athenzCredentialsService.updateCredentials(credentials); credentials = newCredentials; } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update credentials: " + t.getMessage(), t); } } void reportMetrics() { try { Instant expirationTime = getExpirationTime(credentials); Duration remainingLifetime = Duration.between(clock.instant(), expirationTime); metric.set(CERTIFICATE_EXPIRY_METRIC_NAME, remainingLifetime.getSeconds(), null); } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update metrics: " + t.getMessage(), t); } } }
class AthenzIdentityProviderImpl extends AbstractComponent implements AthenzIdentityProvider { private static final Logger log = Logger.getLogger(AthenzIdentityProviderImpl.class.getName()); static final Duration UPDATE_PERIOD = Duration.ofDays(1); static final Duration AWAIT_TERMINTATION_TIMEOUT = Duration.ofSeconds(90); public static final String CERTIFICATE_EXPIRY_METRIC_NAME = "athenz-tenant-cert.expiry.seconds"; private volatile AthenzCredentials credentials; private final Metric metric; private final AthenzCredentialsService athenzCredentialsService; private final ScheduledExecutorService scheduler; private final Clock clock; private final String domain; private final String service; @Inject public AthenzIdentityProviderImpl(IdentityConfig config, Metric metric) { this(config, metric, new AthenzCredentialsService(config, new IdentityDocumentService(config.loadBalancerAddress()), new AthenzService(), Clock.systemUTC()), new ScheduledThreadPoolExecutor(1), Clock.systemUTC()); } AthenzIdentityProviderImpl(IdentityConfig config, Metric metric, AthenzCredentialsService athenzCredentialsService, ScheduledExecutorService scheduler, Clock clock) { this.metric = metric; this.athenzCredentialsService = athenzCredentialsService; this.scheduler = scheduler; this.clock = clock; this.domain = config.domain(); this.service = config.service(); registerInstance(); } @Override public String getDomain() { return domain; } @Override public String getService() { return service; } @Override public SSLContext getIdentitySslContext() { return new AthenzSslContextBuilder() .withIdentityCertificate(new AthenzIdentityCertificate( credentials.getCertificate(), credentials.getKeyPair().getPrivate())) .withTrustStore(new File(Defaults.getDefaults().underVespaHome("share/ssl/certs/yahoo_certificate_bundle.jks")), "JKS") .build(); } @Override public void deconstruct() { try { scheduler.shutdownNow(); scheduler.awaitTermination(AWAIT_TERMINTATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException(e); } } private boolean isExpired(AthenzCredentials credentials) { return clock.instant().isAfter(getExpirationTime(credentials)); } private static Instant getExpirationTime(AthenzCredentials credentials) { return credentials.getCertificate().getNotAfter().toInstant(); } void refreshCertificate() { try { AthenzCredentials newCredentials = isExpired(credentials) ? athenzCredentialsService.registerInstance() : athenzCredentialsService.updateCredentials(credentials); credentials = newCredentials; } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update credentials: " + t.getMessage(), t); } } void reportMetrics() { try { Instant expirationTime = getExpirationTime(credentials); Duration remainingLifetime = Duration.between(clock.instant(), expirationTime); metric.set(CERTIFICATE_EXPIRY_METRIC_NAME, remainingLifetime.getSeconds(), null); } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update metrics: " + t.getMessage(), t); } } }
👍
private boolean canAccess(String hostname, Principal principal) { if (hostname.chars().allMatch(c -> c == '.')) { return false; } if (principal.getName().equals(hostname)) { return true; } return nodeRepository.getNode(hostname) .flatMap(Node::parentHostname) .map(parentHostname -> principal.getName().equals(parentHostname)) .orElse(false); }
if (hostname.chars().allMatch(c -> c == '.')) {
private boolean canAccess(String hostname, Principal principal) { if (hostname.chars().allMatch(c -> c == '.')) { return false; } if (principal.getName().equals(hostname)) { return true; } return nodeRepository.getNode(hostname) .flatMap(Node::parentHostname) .map(parentHostname -> principal.getName().equals(parentHostname)) .orElse(false); }
class Authorizer implements BiPredicate<Principal, URI> { private final SystemName system; private final NodeRepository nodeRepository; public Authorizer(SystemName system, NodeRepository nodeRepository) { this.system = system; this.nodeRepository = nodeRepository; } /** Returns whether principal is authorized to access given URI */ @Override public boolean test(Principal principal, URI uri) { if (principal.getName().equals(trustedService())) { return true; } if (canAccess(hostnamesFrom(uri), principal)) { return true; } return false; } /** Returns whether principal can access node identified by hostname */ /** Returns whether principal can access all nodes identified by given hostnames */ private boolean canAccess(List<String> hostnames, Principal principal) { return !hostnames.isEmpty() && hostnames.stream().allMatch(hostname -> canAccess(hostname, principal)); } /** Trusted service name for this system */ private String trustedService() { if (system != SystemName.main) { return "vespa.vespa." + system.name() + ".hosting"; } return "vespa.vespa.hosting"; } /** Returns hostnames contained in query parameters of given URI */ private static List<String> hostnamesFromQuery(URI uri) { return URLEncodedUtils.parse(uri, StandardCharsets.UTF_8.name()) .stream() .filter(pair -> "hostname".equals(pair.getName()) || "parentHost".equals(pair.getName())) .map(NameValuePair::getValue) .filter(hostname -> !hostname.isEmpty()) .collect(Collectors.toList()); } /** Returns hostnames from a URI if any, e.g. /nodes/v2/node/node1.fqdn */ private static List<String> hostnamesFrom(URI uri) { if (isChildOf("/nodes/v2/acl/", uri.getPath()) || isChildOf("/nodes/v2/node/", uri.getPath()) || isChildOf("/nodes/v2/state/", uri.getPath())) { return Collections.singletonList(lastChildOf(uri.getPath())); } if (isChildOf("/orchestrator/v1/hosts/", uri.getPath())) { return firstChildOf("/orchestrator/v1/hosts/", uri.getPath()) .map(Collections::singletonList) .orElseGet(Collections::emptyList); } if (isChildOf("/orchestrator/v1/suspensions/hosts/", uri.getPath())) { List<String> hostnames = new ArrayList<>(); hostnames.add(lastChildOf(uri.getPath())); hostnames.addAll(hostnamesFromQuery(uri)); return hostnames; } if (isChildOf("/nodes/v2/command/", uri.getPath()) || "/nodes/v2/node/".equals(uri.getPath())) { return hostnamesFromQuery(uri); } return Collections.emptyList(); } /** Returns whether child is a sub-path of parent */ private static boolean isChildOf(String parent, String child) { return child.startsWith(parent) && child.length() > parent.length(); } /** Returns the first component of path relative to root */ private static Optional<String> firstChildOf(String root, String path) { if (!isChildOf(root, path)) { return Optional.empty(); } path = path.substring(root.length(), path.length()); int firstSeparator = path.indexOf('/'); if (firstSeparator == -1) { return Optional.of(path); } return Optional.of(path.substring(0, firstSeparator)); } /** Returns the last component of the given path */ private static String lastChildOf(String path) { if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } int lastSeparator = path.lastIndexOf("/"); if (lastSeparator == - 1) { return path; } return path.substring(lastSeparator + 1, path.length()); } }
class Authorizer implements BiPredicate<Principal, URI> { private final SystemName system; private final NodeRepository nodeRepository; public Authorizer(SystemName system, NodeRepository nodeRepository) { this.system = system; this.nodeRepository = nodeRepository; } /** Returns whether principal is authorized to access given URI */ @Override public boolean test(Principal principal, URI uri) { if (principal.getName().equals(trustedService())) { return true; } if (canAccess(hostnamesFrom(uri), principal)) { return true; } return false; } /** Returns whether principal can access node identified by hostname */ /** Returns whether principal can access all nodes identified by given hostnames */ private boolean canAccess(List<String> hostnames, Principal principal) { return !hostnames.isEmpty() && hostnames.stream().allMatch(hostname -> canAccess(hostname, principal)); } /** Trusted service name for this system */ private String trustedService() { if (system != SystemName.main) { return "vespa.vespa." + system.name() + ".hosting"; } return "vespa.vespa.hosting"; } /** Returns hostnames contained in query parameters of given URI */ private static List<String> hostnamesFromQuery(URI uri) { return URLEncodedUtils.parse(uri, StandardCharsets.UTF_8.name()) .stream() .filter(pair -> "hostname".equals(pair.getName()) || "parentHost".equals(pair.getName())) .map(NameValuePair::getValue) .filter(hostname -> !hostname.isEmpty()) .collect(Collectors.toList()); } /** Returns hostnames from a URI if any, e.g. /nodes/v2/node/node1.fqdn */ private static List<String> hostnamesFrom(URI uri) { if (isChildOf("/nodes/v2/acl/", uri.getPath()) || isChildOf("/nodes/v2/node/", uri.getPath()) || isChildOf("/nodes/v2/state/", uri.getPath())) { return Collections.singletonList(lastChildOf(uri.getPath())); } if (isChildOf("/orchestrator/v1/hosts/", uri.getPath())) { return firstChildOf("/orchestrator/v1/hosts/", uri.getPath()) .map(Collections::singletonList) .orElseGet(Collections::emptyList); } if (isChildOf("/orchestrator/v1/suspensions/hosts/", uri.getPath())) { List<String> hostnames = new ArrayList<>(); hostnames.add(lastChildOf(uri.getPath())); hostnames.addAll(hostnamesFromQuery(uri)); return hostnames; } if (isChildOf("/nodes/v2/command/", uri.getPath()) || "/nodes/v2/node/".equals(uri.getPath())) { return hostnamesFromQuery(uri); } return Collections.emptyList(); } /** Returns whether child is a sub-path of parent */ private static boolean isChildOf(String parent, String child) { return child.startsWith(parent) && child.length() > parent.length(); } /** Returns the first component of path relative to root */ private static Optional<String> firstChildOf(String root, String path) { if (!isChildOf(root, path)) { return Optional.empty(); } path = path.substring(root.length(), path.length()); int firstSeparator = path.indexOf('/'); if (firstSeparator == -1) { return Optional.of(path); } return Optional.of(path.substring(0, firstSeparator)); } /** Returns the last component of the given path */ private static String lastChildOf(String path) { if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } int lastSeparator = path.lastIndexOf("/"); if (lastSeparator == - 1) { return path; } return path.substring(lastSeparator + 1, path.length()); } }
Isn't this the same problem with had last time? Why not just always write flavor, then on the version it can be moved inside the `if`?
private void toSlime(Flavor flavor, Cursor object) { if (flavor.isConfigured()) { object.setString(flavorKey, flavor.name()); } else { NodeResources resources = flavor.resources(); Cursor resourcesObject = object.setObject(resourcesKey); resourcesObject.setDouble(vcpuKey, resources.vcpu()); resourcesObject.setDouble(memoryKey, resources.memoryGb()); resourcesObject.setDouble(diskKey, resources.diskGb()); } }
object.setString(flavorKey, flavor.name());
private void toSlime(Flavor flavor, Cursor object) { if (flavor.isConfigured()) { object.setString(flavorKey, flavor.name()); } else { NodeResources resources = flavor.resources(); Cursor resourcesObject = object.setObject(resourcesKey); resourcesObject.setDouble(vcpuKey, resources.vcpu()); resourcesObject.setDouble(memoryKey, resources.memoryGb()); resourcesObject.setDouble(diskKey, resources.diskGb()); } }
class AllocatedHosts { private static final String mappingKey = "mapping"; private static final String hostSpecKey = "hostSpec"; private static final String hostSpecHostNameKey = "hostName"; private static final String aliasesKey = "aliases"; private static final String hostSpecMembershipKey = "membership"; private static final String flavorKey = "flavor"; private static final String resourcesKey = "resources"; private static final String vcpuKey = "vcpu"; private static final String memoryKey = "memory"; private static final String diskKey = "disk"; /** Wanted version */ private static final String hostSpecVespaVersionKey = "vespaVersion"; /** Current version */ private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion"; private static final String hostSpecNetworkPortsKey = "ports"; private final ImmutableSet<HostSpec> hosts; AllocatedHosts(Set<HostSpec> hosts) { this.hosts = ImmutableSet.copyOf(hosts); } public static AllocatedHosts withHosts(Set<HostSpec> hosts) { return new AllocatedHosts(hosts); } private void toSlime(Cursor cursor) { Cursor array = cursor.setArray(mappingKey); for (HostSpec host : hosts) toSlime(host, array.addObject().setObject(hostSpecKey)); } private void toSlime(HostSpec host, Cursor cursor) { cursor.setString(hostSpecHostNameKey, host.hostname()); aliasesToSlime(host, cursor); host.membership().ifPresent(membership -> { cursor.setString(hostSpecMembershipKey, membership.stringValue()); cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString()); }); host.flavor().ifPresent(flavor -> toSlime(flavor, cursor)); host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString())); host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey))); } private void aliasesToSlime(HostSpec spec, Cursor cursor) { if (spec.aliases().isEmpty()) return; Cursor aliases = cursor.setArray(aliasesKey); for (String alias : spec.aliases()) aliases.addString(alias); } /** Returns the hosts of this allocation */ public Set<HostSpec> getHosts() { return hosts; } private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) { Inspector array = inspector.field(mappingKey); Set<HostSpec> hosts = new LinkedHashSet<>(); array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors))); return new AllocatedHosts(hosts); } static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) { Optional<ClusterMembership> membership = object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty(); Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors); Optional<com.yahoo.component.Version> version = optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new); Optional<NetworkPorts> networkPorts = NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)); return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts); } private static List<String> aliasesFromSlime(Inspector object) { if ( ! object.field(aliasesKey).valid()) return Collections.emptyList(); List<String> aliases = new ArrayList<>(); object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString())); return aliases; } private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) { if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) { return nodeFlavors.get().getFlavor(object.field(flavorKey).asString()); } else if (object.field(resourcesKey).valid()) { Inspector resources = object.field(resourcesKey); return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(), resources.field(memoryKey).asDouble(), resources.field(diskKey).asDouble()))); } else { return Optional.empty(); } } private static ClusterMembership membershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(), com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString())); } private static Optional<String> optionalString(Inspector inspector) { if ( ! inspector.valid()) return Optional.empty(); return Optional.of(inspector.asString()); } public byte[] toJson() throws IOException { Slime slime = new Slime(); toSlime(slime.setObject()); return SlimeUtils.toJsonBytes(slime); } public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) { return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof AllocatedHosts)) return false; return ((AllocatedHosts) other).hosts.equals(this.hosts); } @Override public int hashCode() { return hosts.hashCode(); } @Override public String toString() { return hosts.toString(); } }
class AllocatedHosts { private static final String mappingKey = "mapping"; private static final String hostSpecKey = "hostSpec"; private static final String hostSpecHostNameKey = "hostName"; private static final String aliasesKey = "aliases"; private static final String hostSpecMembershipKey = "membership"; private static final String flavorKey = "flavor"; private static final String resourcesKey = "resources"; private static final String vcpuKey = "vcpu"; private static final String memoryKey = "memory"; private static final String diskKey = "disk"; /** Wanted version */ private static final String hostSpecVespaVersionKey = "vespaVersion"; /** Current version */ private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion"; private static final String hostSpecNetworkPortsKey = "ports"; private final ImmutableSet<HostSpec> hosts; AllocatedHosts(Set<HostSpec> hosts) { this.hosts = ImmutableSet.copyOf(hosts); } public static AllocatedHosts withHosts(Set<HostSpec> hosts) { return new AllocatedHosts(hosts); } private void toSlime(Cursor cursor) { Cursor array = cursor.setArray(mappingKey); for (HostSpec host : hosts) toSlime(host, array.addObject().setObject(hostSpecKey)); } private void toSlime(HostSpec host, Cursor cursor) { cursor.setString(hostSpecHostNameKey, host.hostname()); aliasesToSlime(host, cursor); host.membership().ifPresent(membership -> { cursor.setString(hostSpecMembershipKey, membership.stringValue()); cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString()); }); host.flavor().ifPresent(flavor -> toSlime(flavor, cursor)); host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString())); host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey))); } private void aliasesToSlime(HostSpec spec, Cursor cursor) { if (spec.aliases().isEmpty()) return; Cursor aliases = cursor.setArray(aliasesKey); for (String alias : spec.aliases()) aliases.addString(alias); } /** Returns the hosts of this allocation */ public Set<HostSpec> getHosts() { return hosts; } private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) { Inspector array = inspector.field(mappingKey); Set<HostSpec> hosts = new LinkedHashSet<>(); array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors))); return new AllocatedHosts(hosts); } static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) { Optional<ClusterMembership> membership = object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty(); Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors); Optional<com.yahoo.component.Version> version = optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new); Optional<NetworkPorts> networkPorts = NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)); return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts); } private static List<String> aliasesFromSlime(Inspector object) { if ( ! object.field(aliasesKey).valid()) return Collections.emptyList(); List<String> aliases = new ArrayList<>(); object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString())); return aliases; } private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) { if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) { return nodeFlavors.get().getFlavor(object.field(flavorKey).asString()); } else if (object.field(resourcesKey).valid()) { Inspector resources = object.field(resourcesKey); return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(), resources.field(memoryKey).asDouble(), resources.field(diskKey).asDouble()))); } else { return Optional.empty(); } } private static ClusterMembership membershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(), com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString())); } private static Optional<String> optionalString(Inspector inspector) { if ( ! inspector.valid()) return Optional.empty(); return Optional.of(inspector.asString()); } public byte[] toJson() throws IOException { Slime slime = new Slime(); toSlime(slime.setObject()); return SlimeUtils.toJsonBytes(slime); } public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) { return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof AllocatedHosts)) return false; return ((AllocatedHosts) other).hosts.equals(this.hosts); } @Override public int hashCode() { return hosts.hashCode(); } @Override public String toString() { return hosts.toString(); } }
Missing a word there ... "wait" I suppose
public static void logAndDie(String message, Throwable thrown, boolean dumpThreads) { boolean shutDownInProgress = alreadyShuttingDown.getAndSet(true); try { if (thrown != null) { log.log(Level.SEVERE, message, thrown); } else { log.log(Level.SEVERE, message); } log.log(Level.INFO, "About to shut down."); if (dumpThreads) { dumpThreads(); } } finally { if ( ! shutDownInProgress ) { try { Runtime.getRuntime().halt(1); } catch (Throwable t) { log.log(Level.SEVERE, "Runtime.halt rejected. Throwing an error."); throw new ShutdownError("Shutdown requested, but failed to shut down"); } } else { log.log(Level.WARNING, "Shutdown already in progress. Will just till we die then."); } } }
log.log(Level.WARNING, "Shutdown already in progress. Will just till we die then.");
public static void logAndDie(String message, Throwable thrown, boolean dumpThreads) { boolean shutDownInProgress = alreadyShuttingDown.getAndSet(true); try { if (thrown != null) { log.log(Level.SEVERE, message, thrown); } else { log.log(Level.SEVERE, message); } log.log(Level.INFO, "About to shut down."); if (dumpThreads) { dumpThreads(); } } finally { if ( ! shutDownInProgress ) { try { Runtime.getRuntime().halt(1); } catch (Throwable t) { log.log(Level.SEVERE, "Runtime.halt rejected. Throwing an error."); throw new ShutdownError("Shutdown requested, but failed to shut down"); } } else { log.log(Level.WARNING, "Shutdown already in progress. Will just let death come upon us normally."); } } }
class Process { private static final AtomicBoolean alreadyShuttingDown = new AtomicBoolean(false); private static final AtomicBoolean busyDumpingThreads = new AtomicBoolean(false); private static final Logger log = Logger.getLogger(Process.class.getName()); /** Die with a message, without dumping thread state */ public static void logAndDie(String message) { logAndDie(message, null); } /** Die with a message, optionally dumping thread state */ public static void logAndDie(String message, boolean dumpThreads) { logAndDie(message, null, dumpThreads); } /** Die with a message containing an exception, without dumping thread state */ public static void logAndDie(String message, Throwable thrown) { logAndDie(message, thrown, false); } /** * Log message as severe error, then forcibly exit runtime, without running * exit handlers or otherwise waiting for cleanup. * * @param message message to log before exit * @param thrown the throwable that caused the application to exit. * @param dumpThreads if true the stack trace of all threads is dumped to the * log with level info before shutting down */ public static void dumpThreads() { boolean alreadyDumpingThreads = busyDumpingThreads.getAndSet(true); if ( ! alreadyDumpingThreads ) { try { log.log(Level.INFO, "Commencing full thread dump for diagnosis."); Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); for (Map.Entry<Thread, StackTraceElement[]> e : allStackTraces.entrySet()) { Thread t = e.getKey(); StackTraceElement[] stack = e.getValue(); StringBuilder forOneThread = new StringBuilder(); forOneThread.append("Stack for thread: ").append(t.getName()).append(": "); for (StackTraceElement s : stack) { forOneThread.append('\n').append(s.toString()); } log.log(Level.INFO, forOneThread.toString()); } log.log(Level.INFO, "End of diagnostic thread dump."); } catch (Exception e) { } busyDumpingThreads.set(false); } else { log.log(Level.WARNING, "Thread dump already in progress. Skipping it."); } } @SuppressWarnings("serial") public static class ShutdownError extends Error { public ShutdownError(String message) { super(message); } } }
class Process { private static final AtomicBoolean alreadyShuttingDown = new AtomicBoolean(false); private static final AtomicBoolean busyDumpingThreads = new AtomicBoolean(false); private static final Logger log = Logger.getLogger(Process.class.getName()); /** Die with a message, without dumping thread state */ public static void logAndDie(String message) { logAndDie(message, null); } /** Die with a message, optionally dumping thread state */ public static void logAndDie(String message, boolean dumpThreads) { logAndDie(message, null, dumpThreads); } /** Die with a message containing an exception, without dumping thread state */ public static void logAndDie(String message, Throwable thrown) { logAndDie(message, thrown, false); } /** * Log message as severe error, then forcibly exit runtime, without running * exit handlers or otherwise waiting for cleanup. * * @param message message to log before exit * @param thrown the throwable that caused the application to exit. * @param dumpThreads if true the stack trace of all threads is dumped to the * log with level info before shutting down */ public static void dumpThreads() { boolean alreadyDumpingThreads = busyDumpingThreads.getAndSet(true); if ( ! alreadyDumpingThreads ) { try { log.log(Level.INFO, "Commencing full thread dump for diagnosis."); Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); for (Map.Entry<Thread, StackTraceElement[]> e : allStackTraces.entrySet()) { Thread t = e.getKey(); StackTraceElement[] stack = e.getValue(); StringBuilder forOneThread = new StringBuilder(); forOneThread.append("Stack for thread: ").append(t.getName()).append(": "); for (StackTraceElement s : stack) { forOneThread.append('\n').append(s.toString()); } log.log(Level.INFO, forOneThread.toString()); } log.log(Level.INFO, "End of diagnostic thread dump."); } catch (Exception e) { } busyDumpingThreads.set(false); } else { log.log(Level.WARNING, "Thread dump already in progress. Skipping it."); } } @SuppressWarnings("serial") public static class ShutdownError extends Error { public ShutdownError(String message) { super(message); } } }
Consider adding factory function for `NodeEvent.forBucketSpace` to avoid overloading confusion
private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, PerStateParams params) { if (params.bucketSpace.isPresent()) { return new NodeEvent(nodeInfo, params.bucketSpace.get(), description, NodeEvent.Type.CURRENT, params.currentTime); } else { return new NodeEvent(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime); } }
return new NodeEvent(nodeInfo, params.bucketSpace.get(), description, NodeEvent.Type.CURRENT, params.currentTime);
private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, PerStateParams params) { if (params.bucketSpace.isPresent()) { return NodeEvent.forBucketSpace(nodeInfo, params.bucketSpace.get(), description, NodeEvent.Type.CURRENT, params.currentTime); } else { return NodeEvent.forBaseline(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime); } }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
Consider adding factory function for `NodeEvent.forBaseline` to avoid overloading confusion
private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, PerStateParams params) { if (params.bucketSpace.isPresent()) { return new NodeEvent(nodeInfo, params.bucketSpace.get(), description, NodeEvent.Type.CURRENT, params.currentTime); } else { return new NodeEvent(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime); } }
return new NodeEvent(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime);
private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, PerStateParams params) { if (params.bucketSpace.isPresent()) { return NodeEvent.forBucketSpace(nodeInfo, params.bucketSpace.get(), description, NodeEvent.Type.CURRENT, params.currentTime); } else { return NodeEvent.forBaseline(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime); } }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
Bit unsure if we have any cases where we'd want `similarTo` rather than `equals` here, but let's keep it like this until proven otherwise. Derived states should generally be either exactly equal to the baseline state or with a subset of nodes in another state, so shouldn't be a need for an explicit similarity check.
private static boolean shouldConsiderDerivedStates(Params params, AnnotatedClusterState fromDerivedState, AnnotatedClusterState toDerivedState) { return (!fromDerivedState.getClusterState().equals(params.fromState.getBaselineClusterState())) || (!toDerivedState.getClusterState().equals(params.toState.getBaselineClusterState())); }
(!toDerivedState.getClusterState().equals(params.toState.getBaselineClusterState()));
private static boolean shouldConsiderDerivedStates(Params params, AnnotatedClusterState fromDerivedState, AnnotatedClusterState toDerivedState) { return (!fromDerivedState.getClusterState().equals(params.fromState.getBaselineClusterState())) || (!toDerivedState.getClusterState().equals(params.toState.getBaselineClusterState())); }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
class PerStateParams { final ContentCluster cluster; final Optional<String> bucketSpace; final AnnotatedClusterState fromState; final AnnotatedClusterState toState; final long currentTime; PerStateParams(ContentCluster cluster, Optional<String> bucketSpace, AnnotatedClusterState fromState, AnnotatedClusterState toState, long currentTime) { this.cluster = cluster; this.bucketSpace = bucketSpace; this.fromState = fromState; this.toState = toState; this.currentTime = currentTime; } }
Can remove this TODO now 🙂👌
private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; }
private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewSystemState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState); systemStateBroadcaster.handleNewClusterStates(stateBundle); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(baselineState); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } if (options.enableMultipleBucketSpaces) { configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); } else { configuredBucketSpaces = Collections.emptySet(); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(databaseContext, communicator); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(stateBundle); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster()); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { log.fine(() -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure", taskCompletion.getTask().getClass().getName())); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private ClusterStateDeriver createBucketSpaceStateDeriver() { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long deadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, deadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewSystemState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState); systemStateBroadcaster.handleNewClusterStates(stateBundle); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(baselineState); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } if (options.enableMultipleBucketSpaces) { configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); } else { configuredBucketSpaces = Collections.emptySet(); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(databaseContext, communicator); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(stateBundle); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !masterElectionHandler.isMaster()); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { log.fine(() -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure", taskCompletion.getTask().getClass().getName())); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private ClusterStateDeriver createBucketSpaceStateDeriver() { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long deadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, deadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Would `nodeEventForBaseline` be a more descriptive name here? Bucket space being null seems more of an implementation detail.
public void both_baseline_and_derived_bucket_space_state_events_are_emitted() { EventFixture f = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .derivedClusterStateBefore("default", "distributor:3 storage:3") .clusterStateAfter("distributor:3 storage:3 .0.s:m") .derivedClusterStateAfter("default", "distributor:3 storage:3 .1.s:m"); List<Event> events = f.computeEventDiff(); assertThat(events.size(), equalTo(2)); assertThat(events, hasItem(allOf( eventForNode(storageNode(0)), nodeEventForNullBucketSpace(), nodeEventWithDescription("Altered node state in cluster state from 'U' to 'M'")))); assertThat(events, hasItem(allOf( eventForNode(storageNode(1)), nodeEventForBucketSpace("default"), nodeEventWithDescription("Altered node state in cluster state from 'U' to 'M'")))); }
nodeEventForNullBucketSpace(),
public void both_baseline_and_derived_bucket_space_state_events_are_emitted() { EventFixture f = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .derivedClusterStateBefore("default", "distributor:3 storage:3") .clusterStateAfter("distributor:3 storage:3 .0.s:m") .derivedClusterStateAfter("default", "distributor:3 storage:3 .1.s:m"); List<Event> events = f.computeEventDiff(); assertThat(events.size(), equalTo(2)); assertThat(events, hasItem(allOf( eventForNode(storageNode(0)), nodeEventForBaseline(), nodeEventWithDescription("Altered node state in cluster state from 'U' to 'M'")))); assertThat(events, hasItem(allOf( eventForNode(storageNode(1)), nodeEventForBucketSpace("default"), nodeEventWithDescription("Altered node state in cluster state from 'U' to 'M'")))); }
class EventFixture { final ClusterFixture clusterFixture; AnnotatedClusterState.Builder baselineBefore = new AnnotatedClusterState.Builder(); AnnotatedClusterState.Builder baselineAfter = new AnnotatedClusterState.Builder(); Map<String, AnnotatedClusterState.Builder> derivedBefore = new HashMap<>(); Map<String, AnnotatedClusterState.Builder> derivedAfter = new HashMap<>(); long currentTimeMs = 0; EventFixture(int nodeCount) { this.clusterFixture = ClusterFixture.forFlatCluster(nodeCount); } EventFixture clusterStateBefore(String stateStr) { baselineBefore.clusterState(stateStr); return this; } EventFixture clusterStateAfter(String stateStr) { baselineAfter.clusterState(stateStr); return this; } EventFixture storageNodeReasonBefore(int nodeIndex, NodeStateReason reason) { baselineBefore.storageNodeReason(nodeIndex, reason); return this; } EventFixture storageNodeReasonAfter(int nodeIndex, NodeStateReason reason) { baselineAfter.storageNodeReason(nodeIndex, reason); return this; } EventFixture clusterReasonBefore(ClusterStateReason reason) { baselineBefore.clusterReason(reason); return this; } EventFixture clusterReasonAfter(ClusterStateReason reason) { baselineAfter.clusterReason(reason); return this; } EventFixture currentTimeMs(long timeMs) { this.currentTimeMs = timeMs; return this; } EventFixture derivedClusterStateBefore(String bucketSpace, String stateStr) { getBuilder(derivedBefore, bucketSpace).clusterState(stateStr); return this; } EventFixture derivedClusterStateAfter(String bucketSpace, String stateStr) { getBuilder(derivedAfter, bucketSpace).clusterState(stateStr); return this; } EventFixture derivedStorageNodeReasonBefore(String bucketSpace, int nodeIndex, NodeStateReason reason) { getBuilder(derivedBefore, bucketSpace).storageNodeReason(nodeIndex, reason); return this; } EventFixture derivedStorageNodeReasonAfter(String bucketSpace, int nodeIndex, NodeStateReason reason) { getBuilder(derivedAfter, bucketSpace).storageNodeReason(nodeIndex, reason); return this; } private static AnnotatedClusterState.Builder getBuilder(Map<String, AnnotatedClusterState.Builder> derivedStates, String bucketSpace) { AnnotatedClusterState.Builder result = derivedStates.get(bucketSpace); if (result == null) { result = new AnnotatedClusterState.Builder(); derivedStates.put(bucketSpace, result); } return result; } List<Event> computeEventDiff() { return EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(clusterFixture.cluster()) .fromState(ClusterStateBundle.of(baselineBefore.build(), toDerivedStates(derivedBefore))) .toState(ClusterStateBundle.of(baselineAfter.build(), toDerivedStates(derivedAfter))) .currentTimeMs(currentTimeMs)); } private static Map<String, AnnotatedClusterState> toDerivedStates(Map<String, AnnotatedClusterState.Builder> derivedBuilders) { return derivedBuilders.entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().build())); } static EventFixture createForNodes(int nodeCount) { return new EventFixture(nodeCount); } }
class EventFixture { final ClusterFixture clusterFixture; AnnotatedClusterState.Builder baselineBefore = new AnnotatedClusterState.Builder(); AnnotatedClusterState.Builder baselineAfter = new AnnotatedClusterState.Builder(); Map<String, AnnotatedClusterState.Builder> derivedBefore = new HashMap<>(); Map<String, AnnotatedClusterState.Builder> derivedAfter = new HashMap<>(); long currentTimeMs = 0; EventFixture(int nodeCount) { this.clusterFixture = ClusterFixture.forFlatCluster(nodeCount); } EventFixture clusterStateBefore(String stateStr) { baselineBefore.clusterState(stateStr); return this; } EventFixture clusterStateAfter(String stateStr) { baselineAfter.clusterState(stateStr); return this; } EventFixture storageNodeReasonBefore(int nodeIndex, NodeStateReason reason) { baselineBefore.storageNodeReason(nodeIndex, reason); return this; } EventFixture storageNodeReasonAfter(int nodeIndex, NodeStateReason reason) { baselineAfter.storageNodeReason(nodeIndex, reason); return this; } EventFixture clusterReasonBefore(ClusterStateReason reason) { baselineBefore.clusterReason(reason); return this; } EventFixture clusterReasonAfter(ClusterStateReason reason) { baselineAfter.clusterReason(reason); return this; } EventFixture currentTimeMs(long timeMs) { this.currentTimeMs = timeMs; return this; } EventFixture derivedClusterStateBefore(String bucketSpace, String stateStr) { getBuilder(derivedBefore, bucketSpace).clusterState(stateStr); return this; } EventFixture derivedClusterStateAfter(String bucketSpace, String stateStr) { getBuilder(derivedAfter, bucketSpace).clusterState(stateStr); return this; } EventFixture derivedStorageNodeReasonBefore(String bucketSpace, int nodeIndex, NodeStateReason reason) { getBuilder(derivedBefore, bucketSpace).storageNodeReason(nodeIndex, reason); return this; } EventFixture derivedStorageNodeReasonAfter(String bucketSpace, int nodeIndex, NodeStateReason reason) { getBuilder(derivedAfter, bucketSpace).storageNodeReason(nodeIndex, reason); return this; } private static AnnotatedClusterState.Builder getBuilder(Map<String, AnnotatedClusterState.Builder> derivedStates, String bucketSpace) { return derivedStates.computeIfAbsent(bucketSpace, key -> new AnnotatedClusterState.Builder()); } List<Event> computeEventDiff() { return EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(clusterFixture.cluster()) .fromState(ClusterStateBundle.of(baselineBefore.build(), toDerivedStates(derivedBefore))) .toState(ClusterStateBundle.of(baselineAfter.build(), toDerivedStates(derivedAfter))) .currentTimeMs(currentTimeMs)); } private static Map<String, AnnotatedClusterState> toDerivedStates(Map<String, AnnotatedClusterState.Builder> derivedBuilders) { return derivedBuilders.entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().build())); } static EventFixture createForNodes(int nodeCount) { return new EventFixture(nodeCount); } }
hostNames is supposed to contain parentHostName already (if applicable)
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
What? Note that this only changes the REST API, not the data being passed.
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
Nm, I was confused by both ?hostname= and &hostname=
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { String params = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
class OrchestratorImpl implements Orchestrator { private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override public void suspend(final String hostName) { UpdateHostResponse response; try { response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), /* body */ UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } @Override @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (Exception e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
Could you verify that the stored macro is created and have the expected expression here?
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
}
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); searchFromStored.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); searchFromStored.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } @Test private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
Yes, but in the re-loaded model. In a real system the model is built from file, then later rebuilt from ZooKeeper at which point the original TensorFlow model is not available. The "importFromStored" tests (only) recreates this situation, so I think one of those need to verify that the macros are present and correct after re-loading from ZooKeeper data.
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
}
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); searchFromStored.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); searchFromStored.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } @Test private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
Ahh I see, thanks.
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
}
public void testImportingFromStoredExpressionsWithSmallConstants() throws IOException { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", application); search.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')", null, null, "input", storedApplication); searchFromStored.assertFirstPhaseExpression(expression, "my_profile"); assertSmallConstant("dnn_hidden2_Const", TensorType.fromSpec("tensor(d0[1])"), search); searchFromStored.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); searchFromStored.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
class RankingExpressionWithTensorFlowTestCase { private final Path applicationDir = Path.fromString("src/test/integration/tensorflow/"); private final String vespaExpression = "join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b))"; @After public void removeGeneratedConstantTensorFiles() { IOUtils.recursiveDeleteDir(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); } @Test public void testTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithConstantFeature() { RankProfileSearchFixture search = fixtureWith("constant(mytensor)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", null); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithQueryFeature() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("query(mytensor)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithDocumentFeature() { StoringApplicationPackage application = new StoringApplicationPackage(applicationDir); RankProfileSearchFixture search = fixtureWith("attribute(mytensor)", "tensorflow('mnist_softmax/saved')", null, "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceWithFeatureCombination() { String queryProfile = "<query-profile id='default' type='root'/>"; String queryProfileType = "<query-profile-type id='root'>" + " <field name='query(mytensor)' type='tensor(d0[3],d1[784],d2[10])'/>" + "</query-profile-type>"; StoringApplicationPackage application = new StoringApplicationPackage(applicationDir, queryProfile, queryProfileType); RankProfileSearchFixture search = fixtureWith("sum(query(mytensor) * attribute(mytensor) * constant(mytensor),d2)", "tensorflow('mnist_softmax/saved')", "constant mytensor { file: ignored\ntype: tensor(d0[7],d1[784]) }", "field mytensor type tensor(d0[],d1[784]) { indexing: attribute }", "Placeholder", application); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testNestedTensorFlowReference() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "5 + sum(tensorflow('mnist_softmax/saved'))"); search.assertFirstPhaseExpression("5 + reduce(" + vespaExpression + ", sum)", "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testTensorFlowReferenceSpecifyingSignature() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceSpecifyingSignatureAndOutput() { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'y')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); } @Test public void testTensorFlowReferenceMissingMacro() throws ParseException { try { RankProfileSearchFixture search = new RankProfileSearchFixture( new StoringApplicationPackage(applicationDir), new QueryProfileRegistry(), " rank-profile my_profile {\n" + " first-phase {\n" + " expression: tensorflow('mnist_softmax/saved')" + " }\n" + " }"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) but this macro is " + "not present in rank profile 'my_profile'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceWithWrongMacroType() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d5[10])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved'): " + "Model refers Placeholder 'Placeholder' of type tensor(d0[],d1[784]) which must be produced " + "by a macro in the rank profile, but this macro produces type tensor(d0[2],d5[10])", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingSignature() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_defaultz')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_defaultz'): " + "Model does not have the specified signature 'serving_defaultz'", Exceptions.toMessageString(expected)); } } @Test public void testTensorFlowReferenceSpecifyingNonExistingOutput() { try { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved', 'serving_default', 'x')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); fail("Expecting exception"); } catch (IllegalArgumentException expected) { assertEquals("Rank profile 'my_profile' is invalid: Could not use tensorflow model from " + "tensorflow('mnist_softmax/saved','serving_default','x'): " + "Model does not have the specified output 'x'", Exceptions.toMessageString(expected)); } } @Test public void testImportingFromStoredExpressions() throws IOException { RankProfileSearchFixture search = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); Path storedApplicationDirectory = applicationDir.getParentPath().append("copy"); try { storedApplicationDirectory.toFile().mkdirs(); IOUtils.copyDirectory(applicationDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile(), storedApplicationDirectory.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); StoringApplicationPackage storedApplication = new StoringApplicationPackage(storedApplicationDirectory); RankProfileSearchFixture searchFromStored = fixtureWith("tensor(d0[2],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')", null, null, "Placeholder", storedApplication); searchFromStored.assertFirstPhaseExpression(vespaExpression, "my_profile"); assertLargeConstant("layer_Variable_1_read", searchFromStored, Optional.empty()); assertLargeConstant("layer_Variable_read", searchFromStored, Optional.empty()); } finally { IOUtils.recursiveDeleteDir(storedApplicationDirectory.toFile()); } } @Test public void testTensorFlowReduceBatchDimension() { final String expression = "join(join(reduce(join(reduce(rename(Placeholder, (d0, d1), (d0, d2)), sum, d0), constant(\"layer_Variable_read\"), f(a,b)(a * b)), sum, d2), constant(\"layer_Variable_1_read\"), f(a,b)(a + b)), tensor(d0[1])(1.0), f(a,b)(a * b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist_softmax/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); assertLargeConstant("layer_Variable_1_read", search, Optional.of(10L)); assertLargeConstant("layer_Variable_read", search, Optional.of(7840L)); } @Test public void testMacroGeneration() { final String expression = "join(reduce(join(join(join(constant(\"dnn_hidden2_Const\"), tf_macro_dnn_hidden2_add, f(a,b)(a * b)), tf_macro_dnn_hidden2_add, f(a,b)(max(a,b))), constant(\"dnn_outputs_weights_read\"), f(a,b)(a * b)), sum, d2), constant(\"dnn_outputs_bias_read\"), f(a,b)(a + b))"; final String macroExpression1 = "join(reduce(join(rename(input, (d0, d1), (d0, d4)), constant(\"dnn_hidden1_weights_read\"), f(a,b)(a * b)), sum, d4), constant(\"dnn_hidden1_bias_read\"), f(a,b)(a + b))"; final String macroExpression2 = "join(reduce(join(join(join(0.009999999776482582, tf_macro_dnn_hidden1_add, f(a,b)(a * b)), tf_macro_dnn_hidden1_add, f(a,b)(max(a,b))), constant(\"dnn_hidden2_weights_read\"), f(a,b)(a * b)), sum, d3), constant(\"dnn_hidden2_bias_read\"), f(a,b)(a + b))"; RankProfileSearchFixture search = fixtureWith("tensor(d0[1],d1[784])(0.0)", "tensorflow('mnist/saved')"); search.assertFirstPhaseExpression(expression, "my_profile"); search.assertMacro(macroExpression1, "tf_macro_dnn_hidden1_add", "my_profile"); search.assertMacro(macroExpression2, "tf_macro_dnn_hidden2_add", "my_profile"); } @Test private void assertSmallConstant(String name, TensorType type, RankProfileSearchFixture search) { Value value = search.rankProfile("my_profile").getConstants().get(name); assertNotNull(value); assertEquals(type, value.type()); } /** * Verifies that the constant with the given name exists, and - only if an expected size is given - * that the content of the constant is available and has the expected size. */ private void assertLargeConstant(String name, RankProfileSearchFixture search, Optional<Long> expectedSize) { try { Path constantApplicationPackagePath = Path.fromString("models.generated/mnist_softmax/saved/constants").append(name + ".tbf"); RankingConstant rankingConstant = search.search().getRankingConstants().get(name); assertEquals(name, rankingConstant.getName()); assertTrue(rankingConstant.getFileName().endsWith(constantApplicationPackagePath.toString())); if (expectedSize.isPresent()) { Path constantPath = applicationDir.append(constantApplicationPackagePath); assertTrue("Constant file '" + constantPath + "' has been written", constantPath.toFile().exists()); Tensor deserializedConstant = TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(IOUtils.readFileBytes(constantPath.toFile()))); assertEquals(expectedSize.get().longValue(), deserializedConstant.size()); } } catch (IOException e) { throw new UncheckedIOException(e); } } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression) { return fixtureWith(placeholderExpression, firstPhaseExpression, null, null, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String placeholderExpression, String firstPhaseExpression, String constant, String field) { return fixtureWith(placeholderExpression, firstPhaseExpression, constant, field, "Placeholder", new StoringApplicationPackage(applicationDir)); } private RankProfileSearchFixture fixtureWith(String macroExpression, String firstPhaseExpression, String constant, String field, String macroName, StoringApplicationPackage application) { try { return new RankProfileSearchFixture( application, application.getQueryProfiles(), " rank-profile my_profile {\n" + " macro " + macroName + "() {\n" + " expression: " + macroExpression + " }\n" + " first-phase {\n" + " expression: " + firstPhaseExpression + " }\n" + " }", constant, field); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static class StoringApplicationPackage extends MockApplicationPackage { private final File root; StoringApplicationPackage(Path applicationPackageWritableRoot) { this(applicationPackageWritableRoot, null, null); } StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) { super(null, null, Collections.emptyList(), null, null, null, false, queryProfile, queryProfileType); this.root = new File(applicationPackageWritableRoot.toString()); } @Override public File getFileReference(Path path) { return Path.fromString(root.toString()).append(path).toFile(); } @Override public ApplicationFile getFile(Path file) { return new StoringApplicationPackageFile(file, Path.fromString(root.toString())); } } private static class StoringApplicationPackageFile extends ApplicationFile { /** The path to the application package root */ private final Path root; /** The File pointing to the actual file represented by this */ private final File file; StoringApplicationPackageFile(Path filePath, Path applicationPackagePath) { super(filePath); this.root = applicationPackagePath; file = applicationPackagePath.append(filePath).toFile(); } @Override public boolean isDirectory() { return file.isDirectory(); } @Override public boolean exists() { return file.exists(); } @Override public Reader createReader() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return IOUtils.createReader(file, "UTF-8"); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public InputStream createInputStream() throws FileNotFoundException { try { if ( ! exists()) throw new FileNotFoundException("File '" + file + "' does not exist"); return new BufferedInputStream(new FileInputStream(file)); } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile createDirectory() { file.mkdirs(); return this; } @Override public ApplicationFile writeFile(Reader input) { try { IOUtils.writeFile(file, IOUtils.readAll(input), false); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public ApplicationFile appendFile(String value) { try { IOUtils.writeFile(file, value, true); return this; } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public List<ApplicationFile> listFiles(PathFilter filter) { if ( ! isDirectory()) return Collections.emptyList(); return Arrays.stream(file.listFiles()).filter(f -> filter.accept(Path.fromString(f.toString()))) .map(f -> new StoringApplicationPackageFile(asApplicationRelativePath(f), root)) .collect(Collectors.toList()); } @Override public ApplicationFile delete() { file.delete(); return this; } @Override public MetaData getMetaData() { throw new UnsupportedOperationException(); } @Override public int compareTo(ApplicationFile other) { return this.getPath().getName().compareTo((other).getPath().getName()); } /** Strips the application package root path prefix from the path of the given file */ private Path asApplicationRelativePath(File file) { Path path = Path.fromString(file.toString()); Iterator<String> pathIterator = path.iterator(); for (Iterator<String> rootIterator = root.iterator(); rootIterator.hasNext(); ) { String rootElement = rootIterator.next(); String pathElement = pathIterator.next(); if ( ! rootElement.equals(pathElement)) throw new RuntimeException("Assumption broken"); } Path relative = Path.fromString(""); while (pathIterator.hasNext()) relative = relative.append(pathIterator.next()); return relative; } } }
Can collapse down to ```c++ return (bucketSpaceStats != null && bucketSpaceStats.mayHaveBucketsPending()); ```
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) { if (!stats.hasUpdatesFromAllDistributors()) { return true; } ContentNodeStats nodeStats = stats.getStats().getContentNode(contentNodeIndex); if (nodeStats != null) { ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace); if (bucketSpaceStats != null && bucketSpaceStats.mayHaveBucketsPending()) { return true; } else { return false; } } return true; }
return false;
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) { if (!stats.hasUpdatesFromAllDistributors()) { return true; } ContentNodeStats nodeStats = stats.getStats().getContentNode(contentNodeIndex); if (nodeStats != null) { ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace); return (bucketSpaceStats != null && bucketSpaceStats.mayHaveBucketsPending()); } return true; }
class AggregatedStatsMergePendingChecker implements MergePendingChecker { private final AggregatedClusterStats stats; public AggregatedStatsMergePendingChecker(AggregatedClusterStats stats) { this.stats = stats; } @Override public boolean mayHaveMergesPendingInGlobalSpace() { if (!stats.hasUpdatesFromAllDistributors()) { return true; } for (Iterator<ContentNodeStats> itr = stats.getStats().iterator(); itr.hasNext(); ) { ContentNodeStats stats = itr.next(); if (mayHaveMergesPending(FixedBucketSpaces.globalSpace(), stats.getNodeIndex())) { return true; } } return false; } }
class AggregatedStatsMergePendingChecker implements MergePendingChecker { private final AggregatedClusterStats stats; public AggregatedStatsMergePendingChecker(AggregatedClusterStats stats) { this.stats = stats; } @Override public boolean mayHaveMergesPendingInGlobalSpace() { if (!stats.hasUpdatesFromAllDistributors()) { return true; } for (Iterator<ContentNodeStats> itr = stats.getStats().iterator(); itr.hasNext(); ) { ContentNodeStats stats = itr.next(); if (mayHaveMergesPending(FixedBucketSpaces.globalSpace(), stats.getNodeIndex())) { return true; } } return false; } }
How about just removing the shortcut?
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
I assumed it was there to prevent accidental overrides of data, if a no-op transition was attempted. You have the context here, so feel free to take this call :)
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
Yes, you may be right ... at least I don't want to think about it now.
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to && from != Node.State.reserved) return this; switch (to) { case provisioned: return this.with(new Event(Event.Type.provisioned, agent, at)); case ready: return this.withoutApplicationEvents().with(new Event(Event.Type.readied, agent, at)); case active: return this.with(new Event(Event.Type.activated, agent, at)); case inactive: return this.with(new Event(Event.Type.deactivated, agent, at)); case reserved: return this.with(new Event(Event.Type.reserved, agent, at)); case failed: return this.with(new Event(Event.Type.failed, agent, at)); case dirty: return this.with(new Event(Event.Type.deallocated, agent, at)); case parked: return this.with(new Event(Event.Type.parked, agent, at)); default: return this; } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
class History { private final ImmutableMap<Event.Type, Event> events; public History(Collection<Event> events) { this(toImmutableMap(events)); } private History(ImmutableMap<Event.Type, Event> events) { this.events = events; } private static ImmutableMap<Event.Type, Event> toImmutableMap(Collection<Event> events) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events) builder.put(event.type(), event); return builder.build(); } /** Returns this event if it is present in this history */ public Optional<Event> event(Event.Type type) { return Optional.ofNullable(events.get(type)); } public Collection<Event> events() { return events.values(); } /** Returns a copy of this history with the given event added */ public History with(Event event) { ImmutableMap.Builder<Event.Type, Event> builder = builderWithout(event.type()); builder.put(event.type(), event); return new History(builder.build()); } /** Returns a copy of this history with the given event type removed (or an identical history if it was not present) */ public History without(Event.Type type) { return new History(builderWithout(type).build()); } private ImmutableMap.Builder<Event.Type, Event> builderWithout(Event.Type type) { ImmutableMap.Builder<Event.Type, Event> builder = new ImmutableMap.Builder<>(); for (Event event : events.values()) if (event.type() != type) builder.put(event.type(), event); return builder; } /** Returns a copy of this history with a record of this state transition added, if applicable */ /** * Events can be application or node level. * This returns a copy of this history with all application level events removed. */ private History withoutApplicationEvents() { return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList())); } /** Returns the empty history */ public static History empty() { return new History(Collections.emptyList()); } @Override public String toString() { if (events.isEmpty()) return "history: (empty)"; StringBuilder b = new StringBuilder("history: "); for (Event e : events.values()) b.append(e).append(", "); b.setLength(b.length() -2); return b.toString(); } /** An event which may happen to a node */ public static class Event { private final Instant at; private final Agent agent; private final Type type; public Event(Event.Type type, Agent agent, Instant at) { this.type = type; this.agent = agent; this.at = at; } public enum Type { provisioned(false), readied, reserved, activated, deactivated, deallocated, parked, retired, down, requested, rebooted(false), failed(false); private final boolean applicationLevel; /** Creates an application level event */ Type() { this.applicationLevel = true; } Type(boolean applicationLevel) { this.applicationLevel = applicationLevel; } /** Returns true if this is an application level event and false it it is a node level event */ public boolean isApplicationLevel() { return applicationLevel; } } /** Returns the type of event */ public Event.Type type() { return type; } /** Returns the agent causing this event */ public Agent agent() { return agent; } /** Returns the instant this even took place */ public Instant at() { return at; } @Override public String toString() { return "'" + type + "' event at " + at + " by " + agent; } } }
This should (probably) be `newNode`, `node` is the parent. Unless you wanted to say "Adding new Docker node _to_ `node`"?
void addNewDockerNodes() { if (!isDocker) return; DockerHostCapacity capacity = new DockerHostCapacity(allNodes); ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes)); NodeList list = new NodeList(allNodes); for (Node node : allNodes) { if (node.type() != NodeType.host) continue; if (node.state() != Node.State.active) continue; if (node.status().wantToRetire()) continue; boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity); boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream() .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id())); if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue; log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node); Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes); if (ipAddresses.isEmpty()) continue; String ipAddress = ipAddresses.stream().findFirst().get(); Optional<String> hostname = nameResolver.getHostname(ipAddress); if (!hostname.isPresent()) { log.log(LogLevel.DEBUG, "Could not find hostname for " + ipAddress + ", skipping it"); continue; } Node newNode = Node.createDockerNode("fake-" + hostname.get(), Collections.singleton(ipAddress), Collections.emptySet(), hostname.get(), Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant); PrioritizableNode nodePri = toNodePriority(newNode, false, true); if (!nodePri.violatesSpares || isAllocatingForReplacement) { log.log(LogLevel.DEBUG, "Adding new Docker node " + node); nodes.put(newNode, nodePri); } } }
log.log(LogLevel.DEBUG, "Adding new Docker node " + node);
void addNewDockerNodes() { if (!isDocker) return; DockerHostCapacity capacity = new DockerHostCapacity(allNodes); ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes)); NodeList list = new NodeList(allNodes); for (Node node : allNodes) { if (node.type() != NodeType.host) continue; if (node.state() != Node.State.active) continue; if (node.status().wantToRetire()) continue; boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity); boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream() .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id())); if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue; log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node); Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes); if (ipAddresses.isEmpty()) continue; String ipAddress = ipAddresses.stream().findFirst().get(); Optional<String> hostname = nameResolver.getHostname(ipAddress); if (!hostname.isPresent()) { log.log(LogLevel.DEBUG, "Could not find hostname for " + ipAddress + ", skipping it"); continue; } Node newNode = Node.createDockerNode("fake-" + hostname.get(), Collections.singleton(ipAddress), Collections.emptySet(), hostname.get(), Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant); PrioritizableNode nodePri = toNodePriority(newNode, false, true); if (!nodePri.violatesSpares || isAllocatingForReplacement) { log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, PrioritizableNode> nodes = new HashMap<>(); private final List<Node> allNodes; private final DockerHostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId appId; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final Set<Node> spareHosts; private final Map<Node, ResourceCapacity> headroomHosts; NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares, NameResolver nameResolver) { this.allNodes = Collections.unmodifiableList(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.appId = appId; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, spares); this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors); this.capacity = new DockerHostCapacity(allNodes); long nofFailedNodes = allNodes.stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); long nofNodesInCluster = allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes); this.isDocker = isDocker(); } /** * Spare hosts are the two hosts in the system with the most free capacity. * * We do not count retired or inactive nodes as used capacity (as they could have been * moved to create space for the spare node in the first place). */ private static Set<Node> findSpareHosts(List<Node> nodes, int spares) { DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes)); return nodes.stream() .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted(capacity::compareWithoutInactive) .limit(spares) .collect(Collectors.toSet()); } /** * Headroom hosts are the host with the least but sufficient capacity for the requested headroom. * * If not enough headroom - the headroom violating hosts are the once that are closest to fulfill * a headroom request. */ private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) { DockerHostCapacity capacity = new DockerHostCapacity(nodes); Map<Node, ResourceCapacity> headroomHosts = new HashMap<>(); List<Node> hostsSortedOnLeastCapacity = nodes.stream() .filter(n -> !spareNodes.contains(n)) .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted((a, b) -> capacity.compareWithoutInactive(b, a)) .collect(Collectors.toList()); for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) { Set<Node> tempHeadroom = new HashSet<>(); Set<Node> notEnoughCapacity = new HashSet<>(); ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor); for (Node host : hostsSortedOnLeastCapacity) { if (headroomHosts.containsKey(host)) continue; if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) { headroomHosts.put(host, headroomCapacity); tempHeadroom.add(host); } else { notEnoughCapacity.add(host); } if (tempHeadroom.size() == flavor.getIdealHeadroom()) { break; } } if (tempHeadroom.size() < flavor.getIdealHeadroom()) { List<Node> violations = notEnoughCapacity.stream() .sorted((a, b) -> capacity.compare(b, a)) .limit(flavor.getIdealHeadroom() - tempHeadroom.size()) .collect(Collectors.toList()); for (Node hostViolatingHeadrom : violations) { headroomHosts.put(hostViolatingHeadrom, headroomCapacity); } } } return headroomHosts; } /** * @return The list of nodes sorted by PrioritizableNode::compare */ List<PrioritizableNode> prioritize() { List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values()); Collections.sort(priorityList); return priorityList; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { PrioritizableNode nodePri = toNodePriority(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** * Add a node on each docker host with enough capacity for the requested flavor */ /** * Add existing nodes allocated to the application */ void addApplicationNodes() { List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .map(node -> toNodePriority(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> node.state().equals(Node.State.ready)) .map(node -> toNodePriority(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Convert a list of nodes to a list of node priorities. This includes finding, calculating * parameters to the priority sorting procedure. */ private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) { PrioritizableNode pri = new PrioritizableNode(); pri.node = node; pri.isSurplusNode = isSurplusNode; pri.isNewNode = isNewNode; pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes)); pri.parent = findParentNode(node); if (pri.parent.isPresent()) { Node parent = pri.parent.get(); pri.freeParentCapacity = capacity.freeCapacityOf(parent, false); if (spareHosts.contains(parent)) { pri.violatesSpares = true; } if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) { ResourceCapacity neededCapacity = headroomHosts.get(parent); if (isNewNode) { neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node)); } pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity); } } return pri; } static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) { NodeList list = new NodeList(nodes); return list.childrenOf(parent).asList().stream() .sorted(NodePrioritizer::compareForRelocation) .findFirst() .filter(n -> n.equals(node)) .isPresent(); } private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) { if (nodeFailedNodes == 0) return false; int wantedCount = 0; if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; wantedCount = countSpec.getCount(); } return (wantedCount > nofNodesInCluster - nodeFailedNodes); } private static Flavor getFlavor(NodeSpec requestedNodes) { if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; return countSpec.getFlavor(); } return null; } private boolean isDocker() { Flavor flavor = getFlavor(requestedNodes); return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER); } private Optional<Node> findParentNode(Node node) { if (!node.parentHostname().isPresent()) return Optional.empty(); return allNodes.stream() .filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE"))) .findAny(); } private static int compareForRelocation(Node a, Node b) { int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b)); if (capacity != 0) return capacity; if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1; if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1; if (a.allocation().isPresent() && b.allocation().isPresent()) { if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && !b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return -1; if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return 1; } return a.hostname().compareTo(b.hostname()); } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, PrioritizableNode> nodes = new HashMap<>(); private final List<Node> allNodes; private final DockerHostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId appId; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final Set<Node> spareHosts; private final Map<Node, ResourceCapacity> headroomHosts; NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares, NameResolver nameResolver) { this.allNodes = Collections.unmodifiableList(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.appId = appId; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, spares); this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors); this.capacity = new DockerHostCapacity(allNodes); long nofFailedNodes = allNodes.stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); long nofNodesInCluster = allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes); this.isDocker = isDocker(); } /** * Spare hosts are the two hosts in the system with the most free capacity. * * We do not count retired or inactive nodes as used capacity (as they could have been * moved to create space for the spare node in the first place). */ private static Set<Node> findSpareHosts(List<Node> nodes, int spares) { DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes)); return nodes.stream() .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted(capacity::compareWithoutInactive) .limit(spares) .collect(Collectors.toSet()); } /** * Headroom hosts are the host with the least but sufficient capacity for the requested headroom. * * If not enough headroom - the headroom violating hosts are the once that are closest to fulfill * a headroom request. */ private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) { DockerHostCapacity capacity = new DockerHostCapacity(nodes); Map<Node, ResourceCapacity> headroomHosts = new HashMap<>(); List<Node> hostsSortedOnLeastCapacity = nodes.stream() .filter(n -> !spareNodes.contains(n)) .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted((a, b) -> capacity.compareWithoutInactive(b, a)) .collect(Collectors.toList()); for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) { Set<Node> tempHeadroom = new HashSet<>(); Set<Node> notEnoughCapacity = new HashSet<>(); ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor); for (Node host : hostsSortedOnLeastCapacity) { if (headroomHosts.containsKey(host)) continue; if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) { headroomHosts.put(host, headroomCapacity); tempHeadroom.add(host); } else { notEnoughCapacity.add(host); } if (tempHeadroom.size() == flavor.getIdealHeadroom()) { break; } } if (tempHeadroom.size() < flavor.getIdealHeadroom()) { List<Node> violations = notEnoughCapacity.stream() .sorted((a, b) -> capacity.compare(b, a)) .limit(flavor.getIdealHeadroom() - tempHeadroom.size()) .collect(Collectors.toList()); for (Node hostViolatingHeadrom : violations) { headroomHosts.put(hostViolatingHeadrom, headroomCapacity); } } } return headroomHosts; } /** * @return The list of nodes sorted by PrioritizableNode::compare */ List<PrioritizableNode> prioritize() { List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values()); Collections.sort(priorityList); return priorityList; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { PrioritizableNode nodePri = toNodePriority(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** * Add a node on each docker host with enough capacity for the requested flavor */ /** * Add existing nodes allocated to the application */ void addApplicationNodes() { List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .map(node -> toNodePriority(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> node.state().equals(Node.State.ready)) .map(node -> toNodePriority(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Convert a list of nodes to a list of node priorities. This includes finding, calculating * parameters to the priority sorting procedure. */ private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) { PrioritizableNode pri = new PrioritizableNode(); pri.node = node; pri.isSurplusNode = isSurplusNode; pri.isNewNode = isNewNode; pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes)); pri.parent = findParentNode(node); if (pri.parent.isPresent()) { Node parent = pri.parent.get(); pri.freeParentCapacity = capacity.freeCapacityOf(parent, false); if (spareHosts.contains(parent)) { pri.violatesSpares = true; } if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) { ResourceCapacity neededCapacity = headroomHosts.get(parent); if (isNewNode) { neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node)); } pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity); } } return pri; } static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) { NodeList list = new NodeList(nodes); return list.childrenOf(parent).asList().stream() .sorted(NodePrioritizer::compareForRelocation) .findFirst() .filter(n -> n.equals(node)) .isPresent(); } private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) { if (nodeFailedNodes == 0) return false; int wantedCount = 0; if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; wantedCount = countSpec.getCount(); } return (wantedCount > nofNodesInCluster - nodeFailedNodes); } private static Flavor getFlavor(NodeSpec requestedNodes) { if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; return countSpec.getFlavor(); } return null; } private boolean isDocker() { Flavor flavor = getFlavor(requestedNodes); return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER); } private Optional<Node> findParentNode(Node node) { if (!node.parentHostname().isPresent()) return Optional.empty(); return allNodes.stream() .filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE"))) .findAny(); } private static int compareForRelocation(Node a, Node b) { int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b)); if (capacity != 0) return capacity; if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1; if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1; if (a.allocation().isPresent() && b.allocation().isPresent()) { if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && !b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return -1; if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return 1; } return a.hostname().compareTo(b.hostname()); } }
Yes, thanks, fixed.
void addNewDockerNodes() { if (!isDocker) return; DockerHostCapacity capacity = new DockerHostCapacity(allNodes); ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes)); NodeList list = new NodeList(allNodes); for (Node node : allNodes) { if (node.type() != NodeType.host) continue; if (node.state() != Node.State.active) continue; if (node.status().wantToRetire()) continue; boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity); boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream() .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id())); if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue; log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node); Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes); if (ipAddresses.isEmpty()) continue; String ipAddress = ipAddresses.stream().findFirst().get(); Optional<String> hostname = nameResolver.getHostname(ipAddress); if (!hostname.isPresent()) { log.log(LogLevel.DEBUG, "Could not find hostname for " + ipAddress + ", skipping it"); continue; } Node newNode = Node.createDockerNode("fake-" + hostname.get(), Collections.singleton(ipAddress), Collections.emptySet(), hostname.get(), Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant); PrioritizableNode nodePri = toNodePriority(newNode, false, true); if (!nodePri.violatesSpares || isAllocatingForReplacement) { log.log(LogLevel.DEBUG, "Adding new Docker node " + node); nodes.put(newNode, nodePri); } } }
log.log(LogLevel.DEBUG, "Adding new Docker node " + node);
void addNewDockerNodes() { if (!isDocker) return; DockerHostCapacity capacity = new DockerHostCapacity(allNodes); ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes)); NodeList list = new NodeList(allNodes); for (Node node : allNodes) { if (node.type() != NodeType.host) continue; if (node.state() != Node.State.active) continue; if (node.status().wantToRetire()) continue; boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity); boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream() .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id())); if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue; log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node); Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes); if (ipAddresses.isEmpty()) continue; String ipAddress = ipAddresses.stream().findFirst().get(); Optional<String> hostname = nameResolver.getHostname(ipAddress); if (!hostname.isPresent()) { log.log(LogLevel.DEBUG, "Could not find hostname for " + ipAddress + ", skipping it"); continue; } Node newNode = Node.createDockerNode("fake-" + hostname.get(), Collections.singleton(ipAddress), Collections.emptySet(), hostname.get(), Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant); PrioritizableNode nodePri = toNodePriority(newNode, false, true); if (!nodePri.violatesSpares || isAllocatingForReplacement) { log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, PrioritizableNode> nodes = new HashMap<>(); private final List<Node> allNodes; private final DockerHostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId appId; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final Set<Node> spareHosts; private final Map<Node, ResourceCapacity> headroomHosts; NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares, NameResolver nameResolver) { this.allNodes = Collections.unmodifiableList(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.appId = appId; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, spares); this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors); this.capacity = new DockerHostCapacity(allNodes); long nofFailedNodes = allNodes.stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); long nofNodesInCluster = allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes); this.isDocker = isDocker(); } /** * Spare hosts are the two hosts in the system with the most free capacity. * * We do not count retired or inactive nodes as used capacity (as they could have been * moved to create space for the spare node in the first place). */ private static Set<Node> findSpareHosts(List<Node> nodes, int spares) { DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes)); return nodes.stream() .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted(capacity::compareWithoutInactive) .limit(spares) .collect(Collectors.toSet()); } /** * Headroom hosts are the host with the least but sufficient capacity for the requested headroom. * * If not enough headroom - the headroom violating hosts are the once that are closest to fulfill * a headroom request. */ private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) { DockerHostCapacity capacity = new DockerHostCapacity(nodes); Map<Node, ResourceCapacity> headroomHosts = new HashMap<>(); List<Node> hostsSortedOnLeastCapacity = nodes.stream() .filter(n -> !spareNodes.contains(n)) .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted((a, b) -> capacity.compareWithoutInactive(b, a)) .collect(Collectors.toList()); for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) { Set<Node> tempHeadroom = new HashSet<>(); Set<Node> notEnoughCapacity = new HashSet<>(); ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor); for (Node host : hostsSortedOnLeastCapacity) { if (headroomHosts.containsKey(host)) continue; if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) { headroomHosts.put(host, headroomCapacity); tempHeadroom.add(host); } else { notEnoughCapacity.add(host); } if (tempHeadroom.size() == flavor.getIdealHeadroom()) { break; } } if (tempHeadroom.size() < flavor.getIdealHeadroom()) { List<Node> violations = notEnoughCapacity.stream() .sorted((a, b) -> capacity.compare(b, a)) .limit(flavor.getIdealHeadroom() - tempHeadroom.size()) .collect(Collectors.toList()); for (Node hostViolatingHeadrom : violations) { headroomHosts.put(hostViolatingHeadrom, headroomCapacity); } } } return headroomHosts; } /** * @return The list of nodes sorted by PrioritizableNode::compare */ List<PrioritizableNode> prioritize() { List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values()); Collections.sort(priorityList); return priorityList; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { PrioritizableNode nodePri = toNodePriority(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** * Add a node on each docker host with enough capacity for the requested flavor */ /** * Add existing nodes allocated to the application */ void addApplicationNodes() { List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .map(node -> toNodePriority(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> node.state().equals(Node.State.ready)) .map(node -> toNodePriority(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Convert a list of nodes to a list of node priorities. This includes finding, calculating * parameters to the priority sorting procedure. */ private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) { PrioritizableNode pri = new PrioritizableNode(); pri.node = node; pri.isSurplusNode = isSurplusNode; pri.isNewNode = isNewNode; pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes)); pri.parent = findParentNode(node); if (pri.parent.isPresent()) { Node parent = pri.parent.get(); pri.freeParentCapacity = capacity.freeCapacityOf(parent, false); if (spareHosts.contains(parent)) { pri.violatesSpares = true; } if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) { ResourceCapacity neededCapacity = headroomHosts.get(parent); if (isNewNode) { neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node)); } pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity); } } return pri; } static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) { NodeList list = new NodeList(nodes); return list.childrenOf(parent).asList().stream() .sorted(NodePrioritizer::compareForRelocation) .findFirst() .filter(n -> n.equals(node)) .isPresent(); } private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) { if (nodeFailedNodes == 0) return false; int wantedCount = 0; if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; wantedCount = countSpec.getCount(); } return (wantedCount > nofNodesInCluster - nodeFailedNodes); } private static Flavor getFlavor(NodeSpec requestedNodes) { if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; return countSpec.getFlavor(); } return null; } private boolean isDocker() { Flavor flavor = getFlavor(requestedNodes); return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER); } private Optional<Node> findParentNode(Node node) { if (!node.parentHostname().isPresent()) return Optional.empty(); return allNodes.stream() .filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE"))) .findAny(); } private static int compareForRelocation(Node a, Node b) { int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b)); if (capacity != 0) return capacity; if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1; if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1; if (a.allocation().isPresent() && b.allocation().isPresent()) { if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && !b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return -1; if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return 1; } return a.hostname().compareTo(b.hostname()); } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, PrioritizableNode> nodes = new HashMap<>(); private final List<Node> allNodes; private final DockerHostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId appId; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final Set<Node> spareHosts; private final Map<Node, ResourceCapacity> headroomHosts; NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares, NameResolver nameResolver) { this.allNodes = Collections.unmodifiableList(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.appId = appId; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, spares); this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors); this.capacity = new DockerHostCapacity(allNodes); long nofFailedNodes = allNodes.stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); long nofNodesInCluster = allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes); this.isDocker = isDocker(); } /** * Spare hosts are the two hosts in the system with the most free capacity. * * We do not count retired or inactive nodes as used capacity (as they could have been * moved to create space for the spare node in the first place). */ private static Set<Node> findSpareHosts(List<Node> nodes, int spares) { DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes)); return nodes.stream() .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted(capacity::compareWithoutInactive) .limit(spares) .collect(Collectors.toSet()); } /** * Headroom hosts are the host with the least but sufficient capacity for the requested headroom. * * If not enough headroom - the headroom violating hosts are the once that are closest to fulfill * a headroom request. */ private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) { DockerHostCapacity capacity = new DockerHostCapacity(nodes); Map<Node, ResourceCapacity> headroomHosts = new HashMap<>(); List<Node> hostsSortedOnLeastCapacity = nodes.stream() .filter(n -> !spareNodes.contains(n)) .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) .sorted((a, b) -> capacity.compareWithoutInactive(b, a)) .collect(Collectors.toList()); for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) { Set<Node> tempHeadroom = new HashSet<>(); Set<Node> notEnoughCapacity = new HashSet<>(); ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor); for (Node host : hostsSortedOnLeastCapacity) { if (headroomHosts.containsKey(host)) continue; if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) { headroomHosts.put(host, headroomCapacity); tempHeadroom.add(host); } else { notEnoughCapacity.add(host); } if (tempHeadroom.size() == flavor.getIdealHeadroom()) { break; } } if (tempHeadroom.size() < flavor.getIdealHeadroom()) { List<Node> violations = notEnoughCapacity.stream() .sorted((a, b) -> capacity.compare(b, a)) .limit(flavor.getIdealHeadroom() - tempHeadroom.size()) .collect(Collectors.toList()); for (Node hostViolatingHeadrom : violations) { headroomHosts.put(hostViolatingHeadrom, headroomCapacity); } } } return headroomHosts; } /** * @return The list of nodes sorted by PrioritizableNode::compare */ List<PrioritizableNode> prioritize() { List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values()); Collections.sort(priorityList); return priorityList; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { PrioritizableNode nodePri = toNodePriority(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** * Add a node on each docker host with enough capacity for the requested flavor */ /** * Add existing nodes allocated to the application */ void addApplicationNodes() { List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .map(node -> toNodePriority(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> node.state().equals(Node.State.ready)) .map(node -> toNodePriority(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** * Convert a list of nodes to a list of node priorities. This includes finding, calculating * parameters to the priority sorting procedure. */ private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) { PrioritizableNode pri = new PrioritizableNode(); pri.node = node; pri.isSurplusNode = isSurplusNode; pri.isNewNode = isNewNode; pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes)); pri.parent = findParentNode(node); if (pri.parent.isPresent()) { Node parent = pri.parent.get(); pri.freeParentCapacity = capacity.freeCapacityOf(parent, false); if (spareHosts.contains(parent)) { pri.violatesSpares = true; } if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) { ResourceCapacity neededCapacity = headroomHosts.get(parent); if (isNewNode) { neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node)); } pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity); } } return pri; } static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) { NodeList list = new NodeList(nodes); return list.childrenOf(parent).asList().stream() .sorted(NodePrioritizer::compareForRelocation) .findFirst() .filter(n -> n.equals(node)) .isPresent(); } private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) { if (nodeFailedNodes == 0) return false; int wantedCount = 0; if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; wantedCount = countSpec.getCount(); } return (wantedCount > nofNodesInCluster - nodeFailedNodes); } private static Flavor getFlavor(NodeSpec requestedNodes) { if (requestedNodes instanceof NodeSpec.CountNodeSpec) { NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes; return countSpec.getFlavor(); } return null; } private boolean isDocker() { Flavor flavor = getFlavor(requestedNodes); return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER); } private Optional<Node> findParentNode(Node node) { if (!node.parentHostname().isPresent()) return Optional.empty(); return allNodes.stream() .filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE"))) .findAny(); } private static int compareForRelocation(Node a, Node b) { int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b)); if (capacity != 0) return capacity; if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1; if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1; if (a.allocation().isPresent() && b.allocation().isPresent()) { if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && !b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return -1; if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) && b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container)) return 1; } return a.hostname().compareTo(b.hostname()); } }
With this gone, it does honour `Jimfs` :)
public TemplateFile(Path templatePath) { this.templatePath = templatePath; velocityEngine = new VelocityEngine(); velocityEngine.addProperty( Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem"); velocityEngine.addProperty(Velocity.FILE_RESOURCE_LOADER_PATH, templatePath.getParent().toString()); velocityEngine.init(); }
velocityEngine.addProperty(Velocity.FILE_RESOURCE_LOADER_PATH, templatePath.getParent().toString());
public TemplateFile(Path templatePath) { this.templatePath = templatePath; velocityEngine = new VelocityEngine(); velocityEngine.addProperty( Velocity.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.NullLogSystem"); velocityEngine.addProperty(Velocity.FILE_RESOURCE_LOADER_PATH, templatePath.getParent().toString()); velocityEngine.init(); }
class TemplateFile { private final Path templatePath; private final VelocityEngine velocityEngine; private final VelocityContext velocityContext = new VelocityContext(); public TemplateFile set(String name, Object value) { velocityContext.put(name, value); return this; } public FileWriter getFileWriterTo(Path destinationPath) { return new FileWriter(destinationPath, this::render); } public String render() { Template template = velocityEngine.getTemplate(templatePath.getFileName().toString(), "UTF-8"); StringWriter writer = new StringWriter(); template.merge(velocityContext, writer); return writer.toString(); } }
class TemplateFile { private final Path templatePath; private final VelocityEngine velocityEngine; private final VelocityContext velocityContext = new VelocityContext(); public TemplateFile set(String name, Object value) { velocityContext.put(name, value); return this; } public FileWriter getFileWriterTo(Path destinationPath) { return new FileWriter(destinationPath, this::render); } private String render() { Template template = velocityEngine.getTemplate(templatePath.getFileName().toString(), "UTF-8"); StringWriter writer = new StringWriter(); template.merge(velocityContext, writer); return writer.toString(); } }
Remember to use the new getter after merging #5306
private static DiscFilterRequest toDiscFilterRequest(Request request) { DiscFilterRequest r = mock(DiscFilterRequest.class); when(r.getMethod()).thenReturn(request.method().name()); when(r.getUri()).thenReturn(URI.create("http: when(r.getRemoteAddr()).thenReturn(request.remoteAddr()); if (request.commonName().isPresent()) { X509Certificate cert = certificateFor(request.commonName().get(), keyPair()); when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT)) .thenReturn(new X509Certificate[]{cert}); } return r; }
when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT))
private static DiscFilterRequest toDiscFilterRequest(Request request) { DiscFilterRequest r = mock(DiscFilterRequest.class); when(r.getMethod()).thenReturn(request.method().name()); when(r.getUri()).thenReturn(URI.create("http: when(r.getRemoteAddr()).thenReturn(request.remoteAddr()); if (request.commonName().isPresent()) { X509Certificate cert = certificateFor(request.commonName().get(), keyPair()); when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT)) .thenReturn(new X509Certificate[]{cert}); } return r; }
class FilterTester { private final SecurityRequestFilter filter; public FilterTester(SecurityRequestFilter filter) { this.filter = filter; } public void assertSuccess(Request request) { assertFalse("No response written by filter", getResponse(request).isPresent()); } public void assertRequest(Request request, int status, String body) { Optional<Response> response = getResponse(request); assertTrue("Expected response from filter", response.isPresent()); assertEquals("Response body", body, response.get().body); assertEquals("Status code", status, response.get().status); } private Optional<Response> getResponse(Request request) { RequestHandlerTestDriver.MockResponseHandler handler = new RequestHandlerTestDriver.MockResponseHandler(); filter.filter(toDiscFilterRequest(request), handler); return Optional.ofNullable(handler.getResponse()) .map(response -> new Response(response.getStatus(), handler.readAll())); } /** Create a RSA public/private key pair */ private static KeyPair keyPair() { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(2048); return keyGen.generateKeyPair(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } /** Create a self signed certificate for commonName using given public/private key pair */ private static X509Certificate certificateFor(String commonName, KeyPair keyPair) { try { ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA") .build(keyPair.getPrivate()); X500Name x500Name = new X500Name("CN=" + commonName); Instant now = Instant.now(); Date notBefore = Date.from(now); Date notAfter = Date.from(now.plus(Duration.ofDays(30))); X509v3CertificateBuilder certificateBuilder = new JcaX509v3CertificateBuilder( x500Name, BigInteger.valueOf(now.toEpochMilli()), notBefore, notAfter, x500Name, keyPair.getPublic() ).addExtension(Extension.basicConstraints, true, new BasicConstraints(true)); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(certificateBuilder.build(contentSigner)); } catch (OperatorCreationException |IOException |CertificateException e) { throw new RuntimeException(e); } } private static class Response { private final int status; private final String body; private Response(int status, String body) { this.status = status; this.body = body; } } public static class Request { private final Method method; private final String path; private String remoteAddr; private String commonName; public Request(Method method, String path) { this.method = method; this.path = path; this.commonName = null; this.remoteAddr = "unit-test"; } public Method method() { return method; } public String path() { return path; } public String remoteAddr() { return remoteAddr; } public Optional<String> commonName() { return Optional.ofNullable(commonName); } public Request commonName(String commonName) { this.commonName = commonName; return this; } public Request remoteAddr(String remoteAddr) { this.remoteAddr = remoteAddr; return this; } } }
class FilterTester { private final SecurityRequestFilter filter; public FilterTester(SecurityRequestFilter filter) { this.filter = filter; } public void assertSuccess(Request request) { assertFalse("No response written by filter", getResponse(request).isPresent()); } public void assertRequest(Request request, int status, String body) { Optional<Response> response = getResponse(request); assertTrue("Expected response from filter", response.isPresent()); assertEquals("Response body", body, response.get().body); assertEquals("Status code", status, response.get().status); } private Optional<Response> getResponse(Request request) { RequestHandlerTestDriver.MockResponseHandler handler = new RequestHandlerTestDriver.MockResponseHandler(); filter.filter(toDiscFilterRequest(request), handler); return Optional.ofNullable(handler.getResponse()) .map(response -> new Response(response.getStatus(), handler.readAll())); } /** Create a RSA public/private key pair */ private static KeyPair keyPair() { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(2048); return keyGen.generateKeyPair(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } /** Create a self signed certificate for commonName using given public/private key pair */ private static X509Certificate certificateFor(String commonName, KeyPair keyPair) { try { ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA") .build(keyPair.getPrivate()); X500Name x500Name = new X500Name("CN=" + commonName); Instant now = Instant.now(); Date notBefore = Date.from(now); Date notAfter = Date.from(now.plus(Duration.ofDays(30))); X509v3CertificateBuilder certificateBuilder = new JcaX509v3CertificateBuilder( x500Name, BigInteger.valueOf(now.toEpochMilli()), notBefore, notAfter, x500Name, keyPair.getPublic() ).addExtension(Extension.basicConstraints, true, new BasicConstraints(true)); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(certificateBuilder.build(contentSigner)); } catch (OperatorCreationException |IOException |CertificateException e) { throw new RuntimeException(e); } } private static class Response { private final int status; private final String body; private Response(int status, String body) { this.status = status; this.body = body; } } public static class Request { private final Method method; private final String path; private String remoteAddr; private String commonName; public Request(Method method, String path) { this.method = method; this.path = path; this.commonName = null; this.remoteAddr = "unit-test"; } public Method method() { return method; } public String path() { return path; } public String remoteAddr() { return remoteAddr; } public Optional<String> commonName() { return Optional.ofNullable(commonName); } public Request commonName(String commonName) { this.commonName = commonName; return this; } public Request remoteAddr(String remoteAddr) { this.remoteAddr = remoteAddr; return this; } } }
Yes, will update it in a separate PR, thanks!
private static DiscFilterRequest toDiscFilterRequest(Request request) { DiscFilterRequest r = mock(DiscFilterRequest.class); when(r.getMethod()).thenReturn(request.method().name()); when(r.getUri()).thenReturn(URI.create("http: when(r.getRemoteAddr()).thenReturn(request.remoteAddr()); if (request.commonName().isPresent()) { X509Certificate cert = certificateFor(request.commonName().get(), keyPair()); when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT)) .thenReturn(new X509Certificate[]{cert}); } return r; }
when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT))
private static DiscFilterRequest toDiscFilterRequest(Request request) { DiscFilterRequest r = mock(DiscFilterRequest.class); when(r.getMethod()).thenReturn(request.method().name()); when(r.getUri()).thenReturn(URI.create("http: when(r.getRemoteAddr()).thenReturn(request.remoteAddr()); if (request.commonName().isPresent()) { X509Certificate cert = certificateFor(request.commonName().get(), keyPair()); when(r.getAttribute(ServletRequest.JDISC_REQUEST_X509CERT)) .thenReturn(new X509Certificate[]{cert}); } return r; }
class FilterTester { private final SecurityRequestFilter filter; public FilterTester(SecurityRequestFilter filter) { this.filter = filter; } public void assertSuccess(Request request) { assertFalse("No response written by filter", getResponse(request).isPresent()); } public void assertRequest(Request request, int status, String body) { Optional<Response> response = getResponse(request); assertTrue("Expected response from filter", response.isPresent()); assertEquals("Response body", body, response.get().body); assertEquals("Status code", status, response.get().status); } private Optional<Response> getResponse(Request request) { RequestHandlerTestDriver.MockResponseHandler handler = new RequestHandlerTestDriver.MockResponseHandler(); filter.filter(toDiscFilterRequest(request), handler); return Optional.ofNullable(handler.getResponse()) .map(response -> new Response(response.getStatus(), handler.readAll())); } /** Create a RSA public/private key pair */ private static KeyPair keyPair() { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(2048); return keyGen.generateKeyPair(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } /** Create a self signed certificate for commonName using given public/private key pair */ private static X509Certificate certificateFor(String commonName, KeyPair keyPair) { try { ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA") .build(keyPair.getPrivate()); X500Name x500Name = new X500Name("CN=" + commonName); Instant now = Instant.now(); Date notBefore = Date.from(now); Date notAfter = Date.from(now.plus(Duration.ofDays(30))); X509v3CertificateBuilder certificateBuilder = new JcaX509v3CertificateBuilder( x500Name, BigInteger.valueOf(now.toEpochMilli()), notBefore, notAfter, x500Name, keyPair.getPublic() ).addExtension(Extension.basicConstraints, true, new BasicConstraints(true)); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(certificateBuilder.build(contentSigner)); } catch (OperatorCreationException |IOException |CertificateException e) { throw new RuntimeException(e); } } private static class Response { private final int status; private final String body; private Response(int status, String body) { this.status = status; this.body = body; } } public static class Request { private final Method method; private final String path; private String remoteAddr; private String commonName; public Request(Method method, String path) { this.method = method; this.path = path; this.commonName = null; this.remoteAddr = "unit-test"; } public Method method() { return method; } public String path() { return path; } public String remoteAddr() { return remoteAddr; } public Optional<String> commonName() { return Optional.ofNullable(commonName); } public Request commonName(String commonName) { this.commonName = commonName; return this; } public Request remoteAddr(String remoteAddr) { this.remoteAddr = remoteAddr; return this; } } }
class FilterTester { private final SecurityRequestFilter filter; public FilterTester(SecurityRequestFilter filter) { this.filter = filter; } public void assertSuccess(Request request) { assertFalse("No response written by filter", getResponse(request).isPresent()); } public void assertRequest(Request request, int status, String body) { Optional<Response> response = getResponse(request); assertTrue("Expected response from filter", response.isPresent()); assertEquals("Response body", body, response.get().body); assertEquals("Status code", status, response.get().status); } private Optional<Response> getResponse(Request request) { RequestHandlerTestDriver.MockResponseHandler handler = new RequestHandlerTestDriver.MockResponseHandler(); filter.filter(toDiscFilterRequest(request), handler); return Optional.ofNullable(handler.getResponse()) .map(response -> new Response(response.getStatus(), handler.readAll())); } /** Create a RSA public/private key pair */ private static KeyPair keyPair() { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(2048); return keyGen.generateKeyPair(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } /** Create a self signed certificate for commonName using given public/private key pair */ private static X509Certificate certificateFor(String commonName, KeyPair keyPair) { try { ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA") .build(keyPair.getPrivate()); X500Name x500Name = new X500Name("CN=" + commonName); Instant now = Instant.now(); Date notBefore = Date.from(now); Date notAfter = Date.from(now.plus(Duration.ofDays(30))); X509v3CertificateBuilder certificateBuilder = new JcaX509v3CertificateBuilder( x500Name, BigInteger.valueOf(now.toEpochMilli()), notBefore, notAfter, x500Name, keyPair.getPublic() ).addExtension(Extension.basicConstraints, true, new BasicConstraints(true)); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(certificateBuilder.build(contentSigner)); } catch (OperatorCreationException |IOException |CertificateException e) { throw new RuntimeException(e); } } private static class Response { private final int status; private final String body; private Response(int status, String body) { this.status = status; this.body = body; } } public static class Request { private final Method method; private final String path; private String remoteAddr; private String commonName; public Request(Method method, String path) { this.method = method; this.path = path; this.commonName = null; this.remoteAddr = "unit-test"; } public Method method() { return method; } public String path() { return path; } public String remoteAddr() { return remoteAddr; } public Optional<String> commonName() { return Optional.ofNullable(commonName); } public Request commonName(String commonName) { this.commonName = commonName; return this; } public Request remoteAddr(String remoteAddr) { this.remoteAddr = remoteAddr; return this; } } }
Could be simplified to `request.getClientCertificateChain().stream().findFirst()`.
private static Optional<X509Certificate> getClientCertificate(DiscFilterRequest request) { List<X509Certificate> chain = request.getClientCertificateChain(); if (chain.isEmpty()) return Optional.empty(); return Optional.of(chain.get(0)); }
List<X509Certificate> chain = request.getClientCertificateChain();
private static Optional<X509Certificate> getClientCertificate(DiscFilterRequest request) { List<X509Certificate> chain = request.getClientCertificateChain(); if (chain.isEmpty()) return Optional.empty(); return Optional.of(chain.get(0)); }
class AthenzPrincipalFilter implements SecurityRequestFilter { private final NTokenValidator validator; private final String principalTokenHeader; /** * @param executor to preload the ZMS public keys with */ @Inject public AthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { this(new NTokenValidator(zmsKeystore), executor, config.principalHeaderName()); } AthenzPrincipalFilter(NTokenValidator validator, Executor executor, String principalTokenHeader) { this.validator = validator; this.principalTokenHeader = principalTokenHeader; executor.execute(validator::preloadPublicKeys); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { try { Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request) .map(AthenzIdentities::from) .map(AthenzPrincipal::new); Optional<AthenzPrincipal> nTokenPrincipal = getPrincipalToken(request, principalTokenHeader) .map(validator::validate); if (!certificatePrincipal.isPresent() && !nTokenPrincipal.isPresent()) { String errorMessage = "Unable to authenticate Athenz identity. " + "Either client certificate or principal token is required."; sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } if (certificatePrincipal.isPresent() && nTokenPrincipal.isPresent() && !certificatePrincipal.get().getIdentity().equals(nTokenPrincipal.get().getIdentity())) { String errorMessage = String.format( "Identity in principal token does not match x509 CN: token-identity=%s, cert-identity=%s", nTokenPrincipal.get().getIdentity().getFullName(), certificatePrincipal.get().getIdentity().getFullName()); sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } AthenzPrincipal principal = nTokenPrincipal.orElseGet(certificatePrincipal::get); request.setUserPrincipal(principal); request.setRemoteUser(principal.getName()); } catch (Exception e) { sendErrorResponse(responseHandler,Response.Status.UNAUTHORIZED, e.getMessage()); } } private static Optional<NToken> getPrincipalToken(DiscFilterRequest request, String principalTokenHeaderName) { return Optional.ofNullable(request.getHeader(principalTokenHeaderName)) .filter(token -> !token.isEmpty()) .map(NToken::new); } }
class AthenzPrincipalFilter implements SecurityRequestFilter { private final NTokenValidator validator; private final String principalTokenHeader; /** * @param executor to preload the ZMS public keys with */ @Inject public AthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { this(new NTokenValidator(zmsKeystore), executor, config.principalHeaderName()); } AthenzPrincipalFilter(NTokenValidator validator, Executor executor, String principalTokenHeader) { this.validator = validator; this.principalTokenHeader = principalTokenHeader; executor.execute(validator::preloadPublicKeys); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { try { Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request) .map(AthenzIdentities::from) .map(AthenzPrincipal::new); Optional<AthenzPrincipal> nTokenPrincipal = getPrincipalToken(request, principalTokenHeader) .map(validator::validate); if (!certificatePrincipal.isPresent() && !nTokenPrincipal.isPresent()) { String errorMessage = "Unable to authenticate Athenz identity. " + "Either client certificate or principal token is required."; sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } if (certificatePrincipal.isPresent() && nTokenPrincipal.isPresent() && !certificatePrincipal.get().getIdentity().equals(nTokenPrincipal.get().getIdentity())) { String errorMessage = String.format( "Identity in principal token does not match x509 CN: token-identity=%s, cert-identity=%s", nTokenPrincipal.get().getIdentity().getFullName(), certificatePrincipal.get().getIdentity().getFullName()); sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } AthenzPrincipal principal = nTokenPrincipal.orElseGet(certificatePrincipal::get); request.setUserPrincipal(principal); request.setRemoteUser(principal.getName()); } catch (Exception e) { sendErrorResponse(responseHandler,Response.Status.UNAUTHORIZED, e.getMessage()); } } private static Optional<NToken> getPrincipalToken(DiscFilterRequest request, String principalTokenHeaderName) { return Optional.ofNullable(request.getHeader(principalTokenHeaderName)) .filter(token -> !token.isEmpty()) .map(NToken::new); } }
Missing instanceof check, but I guess that can't happen.
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.JDISC_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
.map(X509Certificate[].class::cast)
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.JDISC_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
class JdiscFilterRequest extends DiscFilterRequest { private final HttpRequest parent; public JdiscFilterRequest(HttpRequest parent) { super(parent); this.parent = parent; } public HttpRequest getParentRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getMethod().name(); } @Override public String getParameter(String name) { if(parent.parameters().containsKey(name)) { return parent.parameters().get(name).get(0); } else { return null; } } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(parent.parameters().keySet()); } @Override public void addHeader(String name, String value) { parent.headers().add(name, value); } @Override public String getHeader(String name) { List<String> values = parent.headers().get(name); if (values == null || values.isEmpty()) { return null; } return values.get(values.size() - 1); } public Enumeration<String> getHeaderNames() { return Collections.enumeration(parent.headers().keySet()); } public List<String> getHeaderNamesAsList() { return new ArrayList<String>(parent.headers().keySet()); } @Override public Enumeration<String> getHeaders(String name) { return Collections.enumeration(getHeadersAsList(name)); } public List<String> getHeadersAsList(String name) { List<String> values = parent.headers().get(name); if(values == null) { return Collections.<String>emptyList(); } return parent.headers().get(name); } @Override public void removeHeaders(String name) { parent.headers().remove(name); } @Override public void setHeaders(String name, String value) { parent.headers().put(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.headers().put(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { this.parent.setUserPrincipal(principal); } @Override @Override public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); } }
class JdiscFilterRequest extends DiscFilterRequest { private final HttpRequest parent; public JdiscFilterRequest(HttpRequest parent) { super(parent); this.parent = parent; } public HttpRequest getParentRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getMethod().name(); } @Override public String getParameter(String name) { if(parent.parameters().containsKey(name)) { return parent.parameters().get(name).get(0); } else { return null; } } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(parent.parameters().keySet()); } @Override public void addHeader(String name, String value) { parent.headers().add(name, value); } @Override public String getHeader(String name) { List<String> values = parent.headers().get(name); if (values == null || values.isEmpty()) { return null; } return values.get(values.size() - 1); } public Enumeration<String> getHeaderNames() { return Collections.enumeration(parent.headers().keySet()); } public List<String> getHeaderNamesAsList() { return new ArrayList<String>(parent.headers().keySet()); } @Override public Enumeration<String> getHeaders(String name) { return Collections.enumeration(getHeadersAsList(name)); } public List<String> getHeadersAsList(String name) { List<String> values = parent.headers().get(name); if(values == null) { return Collections.<String>emptyList(); } return parent.headers().get(name); } @Override public void removeHeaders(String name) { parent.headers().remove(name); } @Override public void setHeaders(String name, String value) { parent.headers().put(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.headers().put(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { this.parent.setUserPrincipal(principal); } @Override @Override public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); } }
Same as above.
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.SERVLET_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
.map(X509Certificate[].class::cast)
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.SERVLET_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
class ServletFilterRequest extends DiscFilterRequest { private final ServletRequest parent; public ServletFilterRequest(ServletRequest parent) { super(parent); this.parent = parent; } ServletRequest getServletRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getRequest().getMethod(); } @Override public void setRemoteAddr(String remoteIpAddress) { throw new UnsupportedOperationException( "Setting remote address is not supported for " + this.getClass().getName()); } @Override public Enumeration<String> getAttributeNames() { Set<String> names = new HashSet<>(Collections.list(super.getAttributeNames())); names.addAll(Collections.list(parent.getRequest().getAttributeNames())); return Collections.enumeration(names); } @Override public Object getAttribute(String name) { Object jdiscAttribute = super.getAttribute(name); return jdiscAttribute != null ? jdiscAttribute : parent.getRequest().getAttribute(name); } @Override public void setAttribute(String name, Object value) { super.setAttribute(name, value); parent.getRequest().setAttribute(name, value); } @Override public boolean containsAttribute(String name) { return super.containsAttribute(name) || parent.getRequest().getAttribute(name) != null; } @Override public void removeAttribute(String name) { super.removeAttribute(name); parent.getRequest().removeAttribute(name); } @Override public String getParameter(String name) { return parent.getParameter(name); } @Override public Enumeration<String> getParameterNames() { return parent.getParameterNames(); } @Override public void addHeader(String name, String value) { parent.addHeader(name, value); } @Override public String getHeader(String name) { return parent.getHeader(name); } @Override public Enumeration<String> getHeaderNames() { return parent.getHeaderNames(); } public List<String> getHeaderNamesAsList() { return Collections.list(getHeaderNames()); } @Override public Enumeration<String> getHeaders(String name) { return parent.getHeaders(name); } @Override public List<String> getHeadersAsList(String name) { return Collections.list(getHeaders(name)); } @Override public void setHeaders(String name, String value) { parent.setHeaders(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.setHeaders(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { parent.setUserPrincipal(principal); } @Override @Override public void removeHeaders(String name) { parent.removeHeaders(name); } @Override public void clearCookies() { parent.removeHeaders(HttpHeaders.Names.COOKIE); } @Override public void setCharacterEncoding(String encoding) { super.setCharacterEncoding(encoding); try { parent.setCharacterEncoding(encoding); } catch (UnsupportedEncodingException e) { throw new RuntimeException("Encoding not supported: " + encoding, e); } } }
class ServletFilterRequest extends DiscFilterRequest { private final ServletRequest parent; public ServletFilterRequest(ServletRequest parent) { super(parent); this.parent = parent; } ServletRequest getServletRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getRequest().getMethod(); } @Override public void setRemoteAddr(String remoteIpAddress) { throw new UnsupportedOperationException( "Setting remote address is not supported for " + this.getClass().getName()); } @Override public Enumeration<String> getAttributeNames() { Set<String> names = new HashSet<>(Collections.list(super.getAttributeNames())); names.addAll(Collections.list(parent.getRequest().getAttributeNames())); return Collections.enumeration(names); } @Override public Object getAttribute(String name) { Object jdiscAttribute = super.getAttribute(name); return jdiscAttribute != null ? jdiscAttribute : parent.getRequest().getAttribute(name); } @Override public void setAttribute(String name, Object value) { super.setAttribute(name, value); parent.getRequest().setAttribute(name, value); } @Override public boolean containsAttribute(String name) { return super.containsAttribute(name) || parent.getRequest().getAttribute(name) != null; } @Override public void removeAttribute(String name) { super.removeAttribute(name); parent.getRequest().removeAttribute(name); } @Override public String getParameter(String name) { return parent.getParameter(name); } @Override public Enumeration<String> getParameterNames() { return parent.getParameterNames(); } @Override public void addHeader(String name, String value) { parent.addHeader(name, value); } @Override public String getHeader(String name) { return parent.getHeader(name); } @Override public Enumeration<String> getHeaderNames() { return parent.getHeaderNames(); } public List<String> getHeaderNamesAsList() { return Collections.list(getHeaderNames()); } @Override public Enumeration<String> getHeaders(String name) { return parent.getHeaders(name); } @Override public List<String> getHeadersAsList(String name) { return Collections.list(getHeaders(name)); } @Override public void setHeaders(String name, String value) { parent.setHeaders(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.setHeaders(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { parent.setUserPrincipal(principal); } @Override @Override public void removeHeaders(String name) { parent.removeHeaders(name); } @Override public void clearCookies() { parent.removeHeaders(HttpHeaders.Names.COOKIE); } @Override public void setCharacterEncoding(String encoding) { super.setCharacterEncoding(encoding); try { parent.setCharacterEncoding(encoding); } catch (UnsupportedEncodingException e) { throw new RuntimeException("Encoding not supported: " + encoding, e); } } }
`NullPointerException` is too generic and could hide an actual problem with the code, something like ``` Optional.ofNullable(metrics.get(type)) .map(m -> m.get(name)) .map(ApplicationMetrics::metricsByDimensions) .ifPresent(m -> m.remove(dimensionsToRemove)); ``` would be better
public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { try{ metrics.get(type) .get(name) .metricsByDimensions() .remove(dimensionsToRemove); } catch (NullPointerException e) {} }
} catch (NullPointerException e) {}
public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { synchronized (monitor) { Optional.ofNullable(metrics.get(type)) .map(m -> m.get(name)) .map(ApplicationMetrics::metricsByDimensions) .ifPresent(m -> m.remove(dimensionsToRemove)); } }
class MetricReceiverWrapper { public static final String APPLICATION_DOCKER = "docker"; public static final String APPLICATION_HOST = "vespa.host"; public static final String APPLICATION_NODE = "vespa.node"; private final Object monitor = new Object(); private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); private final MetricReceiver metricReceiver; @Inject public MetricReceiverWrapper(MetricReceiver metricReceiver) { this.metricReceiver = metricReceiver; } /** * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). */ public CounterWrapper declareCounter(String application, Dimensions dimensions, String name) { return declareCounter(application, dimensions, name, DimensionType.DEFAULT); } public CounterWrapper declareCounter(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, counter); } return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); } } /** * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). */ public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name) { return declareGauge(application, dimensions, name, DimensionType.DEFAULT); } public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, gauge); } return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); } } public List<DimensionMetrics> getDefaultMetrics() { return getMetricsByType(DimensionType.DEFAULT); } public Set<Map<String, Object>> getDefaultMetricsRaw() { synchronized (monitor) { Set<Map<String, Object>> dimensionMetrics = new HashSet<>(); metrics.getOrDefault(DimensionType.DEFAULT, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .map(DimensionMetrics::getMetrics) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } public List<DimensionMetrics> getMetricsByType(DimensionType type) { synchronized (monitor) { List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); metrics.getOrDefault(type, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } Map<String, Number> getMetricsForDimension(String application, Dimensions dimensions) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, DimensionType.DEFAULT); try { return metricsByDimensions.get(dimensions).entrySet().stream().collect( Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); } catch (NullPointerException e) { return new HashMap<>(); } } } private Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { Map<String, ApplicationMetrics> applicationMetrics = metrics.computeIfAbsent(type, m -> new HashMap<>()); if (! applicationMetrics.containsKey(application)) { ApplicationMetrics metrics = new ApplicationMetrics(); applicationMetrics.put(application, metrics); } return applicationMetrics.get(application).metricsByDimensions(); } private static class ApplicationMetrics { private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { return metricsByDimensions; } } public enum DimensionType {DEFAULT, PRETAGGED} }
class MetricReceiverWrapper { public static final String APPLICATION_DOCKER = "docker"; public static final String APPLICATION_HOST = "vespa.host"; public static final String APPLICATION_NODE = "vespa.node"; private final Object monitor = new Object(); private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); private final MetricReceiver metricReceiver; @Inject public MetricReceiverWrapper(MetricReceiver metricReceiver) { this.metricReceiver = metricReceiver; } /** * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). */ public CounterWrapper declareCounter(String application, Dimensions dimensions, String name) { return declareCounter(application, dimensions, name, DimensionType.DEFAULT); } public CounterWrapper declareCounter(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, counter); } return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); } } /** * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). */ public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name) { return declareGauge(application, dimensions, name, DimensionType.DEFAULT); } public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, gauge); } return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); } } public List<DimensionMetrics> getDefaultMetrics() { return getMetricsByType(DimensionType.DEFAULT); } public Set<Map<String, Object>> getDefaultMetricsRaw() { synchronized (monitor) { Set<Map<String, Object>> dimensionMetrics = new HashSet<>(); metrics.getOrDefault(DimensionType.DEFAULT, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .map(DimensionMetrics::getMetrics) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } public List<DimensionMetrics> getMetricsByType(DimensionType type) { synchronized (monitor) { List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); metrics.getOrDefault(type, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } Map<String, Number> getMetricsForDimension(String application, Dimensions dimensions) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, DimensionType.DEFAULT); return metricsByDimensions.getOrDefault(dimensions, Collections.emptyMap()) .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); } } private Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { Map<String, ApplicationMetrics> applicationMetrics = metrics.computeIfAbsent(type, m -> new HashMap<>()); if (! applicationMetrics.containsKey(application)) { ApplicationMetrics metrics = new ApplicationMetrics(); applicationMetrics.put(application, metrics); } return applicationMetrics.get(application).metricsByDimensions(); } private static class ApplicationMetrics { private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { return metricsByDimensions; } } public enum DimensionType {DEFAULT, PRETAGGED} }
It want it to fail hard if it's not `X509Certificate[]`.
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.JDISC_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
.map(X509Certificate[].class::cast)
public List<X509Certificate> getClientCertificateChain() { return Optional.ofNullable(parent.context().get(ServletRequest.JDISC_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); }
class JdiscFilterRequest extends DiscFilterRequest { private final HttpRequest parent; public JdiscFilterRequest(HttpRequest parent) { super(parent); this.parent = parent; } public HttpRequest getParentRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getMethod().name(); } @Override public String getParameter(String name) { if(parent.parameters().containsKey(name)) { return parent.parameters().get(name).get(0); } else { return null; } } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(parent.parameters().keySet()); } @Override public void addHeader(String name, String value) { parent.headers().add(name, value); } @Override public String getHeader(String name) { List<String> values = parent.headers().get(name); if (values == null || values.isEmpty()) { return null; } return values.get(values.size() - 1); } public Enumeration<String> getHeaderNames() { return Collections.enumeration(parent.headers().keySet()); } public List<String> getHeaderNamesAsList() { return new ArrayList<String>(parent.headers().keySet()); } @Override public Enumeration<String> getHeaders(String name) { return Collections.enumeration(getHeadersAsList(name)); } public List<String> getHeadersAsList(String name) { List<String> values = parent.headers().get(name); if(values == null) { return Collections.<String>emptyList(); } return parent.headers().get(name); } @Override public void removeHeaders(String name) { parent.headers().remove(name); } @Override public void setHeaders(String name, String value) { parent.headers().put(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.headers().put(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { this.parent.setUserPrincipal(principal); } @Override @Override public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); } }
class JdiscFilterRequest extends DiscFilterRequest { private final HttpRequest parent; public JdiscFilterRequest(HttpRequest parent) { super(parent); this.parent = parent; } public HttpRequest getParentRequest() { return parent; } public void setUri(URI uri) { parent.setUri(uri); } @Override public String getMethod() { return parent.getMethod().name(); } @Override public String getParameter(String name) { if(parent.parameters().containsKey(name)) { return parent.parameters().get(name).get(0); } else { return null; } } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(parent.parameters().keySet()); } @Override public void addHeader(String name, String value) { parent.headers().add(name, value); } @Override public String getHeader(String name) { List<String> values = parent.headers().get(name); if (values == null || values.isEmpty()) { return null; } return values.get(values.size() - 1); } public Enumeration<String> getHeaderNames() { return Collections.enumeration(parent.headers().keySet()); } public List<String> getHeaderNamesAsList() { return new ArrayList<String>(parent.headers().keySet()); } @Override public Enumeration<String> getHeaders(String name) { return Collections.enumeration(getHeadersAsList(name)); } public List<String> getHeadersAsList(String name) { List<String> values = parent.headers().get(name); if(values == null) { return Collections.<String>emptyList(); } return parent.headers().get(name); } @Override public void removeHeaders(String name) { parent.headers().remove(name); } @Override public void setHeaders(String name, String value) { parent.headers().put(name, value); } @Override public void setHeaders(String name, List<String> values) { parent.headers().put(name, values); } @Override public Principal getUserPrincipal() { return parent.getUserPrincipal(); } @Override public void setUserPrincipal(Principal principal) { this.parent.setUserPrincipal(principal); } @Override @Override public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); } }
Thanks for the tip. Completely forgot that one :)
private static Optional<X509Certificate> getClientCertificate(DiscFilterRequest request) { List<X509Certificate> chain = request.getClientCertificateChain(); if (chain.isEmpty()) return Optional.empty(); return Optional.of(chain.get(0)); }
List<X509Certificate> chain = request.getClientCertificateChain();
private static Optional<X509Certificate> getClientCertificate(DiscFilterRequest request) { List<X509Certificate> chain = request.getClientCertificateChain(); if (chain.isEmpty()) return Optional.empty(); return Optional.of(chain.get(0)); }
class AthenzPrincipalFilter implements SecurityRequestFilter { private final NTokenValidator validator; private final String principalTokenHeader; /** * @param executor to preload the ZMS public keys with */ @Inject public AthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { this(new NTokenValidator(zmsKeystore), executor, config.principalHeaderName()); } AthenzPrincipalFilter(NTokenValidator validator, Executor executor, String principalTokenHeader) { this.validator = validator; this.principalTokenHeader = principalTokenHeader; executor.execute(validator::preloadPublicKeys); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { try { Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request) .map(AthenzIdentities::from) .map(AthenzPrincipal::new); Optional<AthenzPrincipal> nTokenPrincipal = getPrincipalToken(request, principalTokenHeader) .map(validator::validate); if (!certificatePrincipal.isPresent() && !nTokenPrincipal.isPresent()) { String errorMessage = "Unable to authenticate Athenz identity. " + "Either client certificate or principal token is required."; sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } if (certificatePrincipal.isPresent() && nTokenPrincipal.isPresent() && !certificatePrincipal.get().getIdentity().equals(nTokenPrincipal.get().getIdentity())) { String errorMessage = String.format( "Identity in principal token does not match x509 CN: token-identity=%s, cert-identity=%s", nTokenPrincipal.get().getIdentity().getFullName(), certificatePrincipal.get().getIdentity().getFullName()); sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } AthenzPrincipal principal = nTokenPrincipal.orElseGet(certificatePrincipal::get); request.setUserPrincipal(principal); request.setRemoteUser(principal.getName()); } catch (Exception e) { sendErrorResponse(responseHandler,Response.Status.UNAUTHORIZED, e.getMessage()); } } private static Optional<NToken> getPrincipalToken(DiscFilterRequest request, String principalTokenHeaderName) { return Optional.ofNullable(request.getHeader(principalTokenHeaderName)) .filter(token -> !token.isEmpty()) .map(NToken::new); } }
class AthenzPrincipalFilter implements SecurityRequestFilter { private final NTokenValidator validator; private final String principalTokenHeader; /** * @param executor to preload the ZMS public keys with */ @Inject public AthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { this(new NTokenValidator(zmsKeystore), executor, config.principalHeaderName()); } AthenzPrincipalFilter(NTokenValidator validator, Executor executor, String principalTokenHeader) { this.validator = validator; this.principalTokenHeader = principalTokenHeader; executor.execute(validator::preloadPublicKeys); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { try { Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request) .map(AthenzIdentities::from) .map(AthenzPrincipal::new); Optional<AthenzPrincipal> nTokenPrincipal = getPrincipalToken(request, principalTokenHeader) .map(validator::validate); if (!certificatePrincipal.isPresent() && !nTokenPrincipal.isPresent()) { String errorMessage = "Unable to authenticate Athenz identity. " + "Either client certificate or principal token is required."; sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } if (certificatePrincipal.isPresent() && nTokenPrincipal.isPresent() && !certificatePrincipal.get().getIdentity().equals(nTokenPrincipal.get().getIdentity())) { String errorMessage = String.format( "Identity in principal token does not match x509 CN: token-identity=%s, cert-identity=%s", nTokenPrincipal.get().getIdentity().getFullName(), certificatePrincipal.get().getIdentity().getFullName()); sendErrorResponse(responseHandler, Response.Status.UNAUTHORIZED, errorMessage); return; } AthenzPrincipal principal = nTokenPrincipal.orElseGet(certificatePrincipal::get); request.setUserPrincipal(principal); request.setRemoteUser(principal.getName()); } catch (Exception e) { sendErrorResponse(responseHandler,Response.Status.UNAUTHORIZED, e.getMessage()); } } private static Optional<NToken> getPrincipalToken(DiscFilterRequest request, String principalTokenHeaderName) { return Optional.ofNullable(request.getHeader(principalTokenHeaderName)) .filter(token -> !token.isEmpty()) .map(NToken::new); } }
Please add a method on environment instead of matching the free form and human-readable region.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Yes, we should do this properly. I just wanted to un-break CD ASAP. So, a `Cloud getCloud()`, or something, is probably a good idea.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Ah, that was harder than expected ...
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Checking against AWS is artificial in any case. Could we just check directly whether using the ContainerData would work?
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
`whoami = "root"`
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Maybe something like `if (Files.isWritable(ContainerData.containerDataPath)) {`? Add that as a function to ContainerData?
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Yes, something like that is clearly the right way when writing is optional. I think the right thing here is to throw an exception when we can't allow this to fail, which is the case for the config server usage. For the MotD and the prompt, it's fine to just skip the writing if we can't do it. I'll add a fix like this, but I may choose try-catch in case there are other things that will make us fail that we haven't thought about yet :)
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
It would be nice to know in a catch, whether the exception is an error and should be logged as such w/stacktrace, or whether it is fine and can be ignored. Testing for Files.isWriteable is better in that respect.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
That's why I was thinking to try-catch only the part that is allowed to fail.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
Ok, my point was that I think it's better to `try-catch`, or check `Files.isWritable`, only for the part where we can continue without success.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
```suggestion LOG.info("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage()); ```
private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException { try { tmpPartitionNames = getTmpPartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId).getName()) .collect(Collectors.toList()); Map<String, Long> partitionLastVersion = Maps.newHashMap(); optimizeClause.getSourcePartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId)).forEach( partition -> { sourcePartitionNames.add(partition.getName()); partitionLastVersion.put(partition.getName(), partition.getSubPartitions().stream() .mapToLong(PhysicalPartition::getVisibleVersion).sum()); } ); boolean hasFailedTask = false; for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED || partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) { LOG.info("optimize job {} rewrite task {} state {} failed or partition {} version {} change to {}", jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(), rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName())); sourcePartitionNames.remove(rewriteTask.getPartitionName()); tmpPartitionNames.remove(rewriteTask.getTempPartitionName()); targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true); hasFailedTask = true; } } if (sourcePartitionNames.isEmpty()) { throw new AlterCancelException("all partitions rewrite failed"); } if (hasFailedTask && (optimizeClause.getKeysDesc() != null || optimizeClause.getSortKeys() != null)) { rewriteTasks.forEach( rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true)); throw new AlterCancelException("optimize keysType or sort keys failed since some partitions rewrite failed"); } Set<Tablet> sourceTablets = Sets.newHashSet(); sourcePartitionNames.forEach(name -> { Partition partition = targetTable.getPartition(name); for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } }); allPartitionOptimized = false; if (!hasFailedTask && optimizeClause.getDistributionDesc() != null) { Set<String> targetPartitionNames = targetTable.getPartitionNames(); long targetPartitionNum = targetPartitionNames.size(); targetPartitionNames.retainAll(sourcePartitionNames); if (targetPartitionNames.size() == targetPartitionNum && targetPartitionNum == sourcePartitionNames.size()) { allPartitionOptimized = true; } else if (optimizeClause.getDistributionDesc().getType() != targetTable.getDefaultDistributionInfo().getType()) { throw new AlterCancelException("can not change distribution type of target table" + "since partial partitions are not optimized"); } } PartitionInfo partitionInfo = targetTable.getPartitionInfo(); if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) { targetTable.replaceTempPartitions(sourcePartitionNames, tmpPartitionNames, true, false); } else if (partitionInfo instanceof SinglePartitionInfo) { Preconditions.checkState(sourcePartitionNames.size() == 1 && tmpPartitionNames.size() == 1); targetTable.replacePartition(sourcePartitionNames.get(0), tmpPartitionNames.get(0)); } else { throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported"); } ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(), sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo); GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info); sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); try { GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(targetTable, true /* isJoin */, null /* expectGroupId */); } catch (DdlException e) { LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage()); } targetTable.lastSchemaUpdateTime.set(System.currentTimeMillis()); if (allPartitionOptimized) { this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns()); targetTable.setDefaultDistributionInfo(distributionInfo); } targetTable.setState(OlapTableState.NORMAL); LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{}," + "source partitions:{}, tmp partitions:{}, allOptimized:{}", jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } catch (Exception e) { LOG.warn("replace partitions failed when insert overwrite into dbId:{}, tableId:{}", dbId, tableId, e); throw new AlterCancelException("replace partitions failed " + e); } }
LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage());
private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException { try { tmpPartitionNames = getTmpPartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId).getName()) .collect(Collectors.toList()); Map<String, Long> partitionLastVersion = Maps.newHashMap(); optimizeClause.getSourcePartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId)).forEach( partition -> { sourcePartitionNames.add(partition.getName()); partitionLastVersion.put(partition.getName(), partition.getSubPartitions().stream() .mapToLong(PhysicalPartition::getVisibleVersion).sum()); } ); boolean hasFailedTask = false; for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED || partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) { LOG.info("optimize job {} rewrite task {} state {} failed or partition {} version {} change to {}", jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(), rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName())); sourcePartitionNames.remove(rewriteTask.getPartitionName()); tmpPartitionNames.remove(rewriteTask.getTempPartitionName()); targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true); hasFailedTask = true; } } if (sourcePartitionNames.isEmpty()) { throw new AlterCancelException("all partitions rewrite failed"); } if (hasFailedTask && (optimizeClause.getKeysDesc() != null || optimizeClause.getSortKeys() != null)) { rewriteTasks.forEach( rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true)); throw new AlterCancelException("optimize keysType or sort keys failed since some partitions rewrite failed"); } Set<Tablet> sourceTablets = Sets.newHashSet(); sourcePartitionNames.forEach(name -> { Partition partition = targetTable.getPartition(name); for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } }); allPartitionOptimized = false; if (!hasFailedTask && optimizeClause.getDistributionDesc() != null) { Set<String> targetPartitionNames = targetTable.getPartitionNames(); long targetPartitionNum = targetPartitionNames.size(); targetPartitionNames.retainAll(sourcePartitionNames); if (targetPartitionNames.size() == targetPartitionNum && targetPartitionNum == sourcePartitionNames.size()) { allPartitionOptimized = true; } else if (optimizeClause.getDistributionDesc().getType() != targetTable.getDefaultDistributionInfo().getType()) { throw new AlterCancelException("can not change distribution type of target table" + "since partial partitions are not optimized"); } } PartitionInfo partitionInfo = targetTable.getPartitionInfo(); if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) { targetTable.replaceTempPartitions(sourcePartitionNames, tmpPartitionNames, true, false); } else if (partitionInfo instanceof SinglePartitionInfo) { Preconditions.checkState(sourcePartitionNames.size() == 1 && tmpPartitionNames.size() == 1); targetTable.replacePartition(sourcePartitionNames.get(0), tmpPartitionNames.get(0)); } else { throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported"); } ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(), sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo); GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info); sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); try { GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(targetTable, true /* isJoin */, null /* expectGroupId */); } catch (DdlException e) { LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage()); } targetTable.lastSchemaUpdateTime.set(System.currentTimeMillis()); if (allPartitionOptimized) { this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns()); targetTable.setDefaultDistributionInfo(distributionInfo); } targetTable.setState(OlapTableState.NORMAL); LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{}," + "source partitions:{}, tmp partitions:{}, allOptimized:{}", jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } catch (Exception e) { LOG.warn("replace partitions failed when insert overwrite into dbId:{}, tableId:{}", dbId, tableId, e); throw new AlterCancelException("replace partitions failed " + e); } }
class OptimizeJobV2 extends AlterJobV2 implements GsonPostProcessable { private static final Logger LOG = LogManager.getLogger(OptimizeJobV2.class); @SerializedName(value = "watershedTxnId") protected long watershedTxnId = -1; private final String postfix; @SerializedName(value = "tmpPartitionIds") private List<Long> tmpPartitionIds = Lists.newArrayList(); private OptimizeClause optimizeClause; private String dbName = ""; private Map<String, String> properties = Maps.newHashMap(); @SerializedName(value = "rewriteTasks") private List<OptimizeTask> rewriteTasks = Lists.newArrayList(); private int progress = 0; @SerializedName(value = "sourcePartitionNames") private List<String> sourcePartitionNames = Lists.newArrayList(); @SerializedName(value = "tmpPartitionNames") private List<String> tmpPartitionNames = Lists.newArrayList(); @SerializedName(value = "allPartitionOptimized") private Boolean allPartitionOptimized = false; @SerializedName(value = "distributionInfo") private DistributionInfo distributionInfo; @SerializedName(value = "optimizeOperation") private String optimizeOperation = ""; public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs, OptimizeClause optimizeClause) { this(jobId, dbId, tableId, tableName, timeoutMs); this.optimizeClause = optimizeClause; } public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) { super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs); this.postfix = "_" + jobId; } public List<Long> getTmpPartitionIds() { return tmpPartitionIds; } public void setTmpPartitionIds(List<Long> tmpPartitionIds) { this.tmpPartitionIds = tmpPartitionIds; } public String getName() { return "optimize-" + this.postfix; } public Map<String, String> getProperties() { return properties; } public List<OptimizeTask> getOptimizeTasks() { return rewriteTasks; } private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException { Table table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName()); } Preconditions.checkState(table instanceof OlapTable); return (OlapTable) table; } /** * runPendingJob(): * 1. Create all temp partitions and wait them finished. * 2. Get a new transaction id, then set job's state to WAITING_TXN */ @Override protected void runPendingJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.PENDING, jobState); LOG.info("begin to send create temp partitions. job: {}", jobId); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("Database " + dbId + " does not exist"); } if (!checkTableStable(db)) { return; } if (optimizeClause == null) { throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId); } for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) { tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId()); } long createPartitionStartTimestamp = System.currentTimeMillis(); OlapTable targetTable; Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { targetTable = checkAndGetTable(db, tableId); } finally { locker.unLockDatabase(db, LockType.READ); } try { PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix, optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc()); LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId); } catch (Exception e) { LOG.warn("create temp partitions failed", e); throw new AlterCancelException("create temp partitions failed " + e); } long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp; this.watershedTxnId = GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); this.jobState = JobState.WAITING_TXN; this.optimizeOperation = optimizeClause.toString(); span.setAttribute("createPartitionElapse", createPartitionElapse); span.setAttribute("watershedTxnId", this.watershedTxnId); span.addEvent("setWaitingTxn"); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId); } /** * runWaitingTxnJob(): * 1. Wait the transactions before the watershedTxnId to be finished. * 2. If all previous transactions finished, start insert into data to temp partitions. * 3. Change job state to RUNNING. */ @Override protected void runWaitingTxnJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState); if (optimizeClause == null) { throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId); } try { if (!isPreviousLoadFinished()) { LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId); return; } } catch (AnalysisException e) { throw new AlterCancelException(e.getMessage()); } LOG.info("previous transactions are all finished, begin to optimize table. job: {}", jobId); List<String> tmpPartitionNames; List<String> partitionNames = Lists.newArrayList(); List<Long> partitionLastVersion = Lists.newArrayList(); List<String> tableCoumnNames = Lists.newArrayList(); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("database id: " + dbId + " does not exist"); } Locker locker = new Locker(); if (!locker.lockAndCheckExist(db, LockType.READ)) { throw new AlterCancelException("insert overwrite commit failed because locking db: " + dbId + " failed"); } try { dbName = db.getFullName(); OlapTable targetTable = checkAndGetTable(db, tableId); if (getTmpPartitionIds().stream().anyMatch(id -> targetTable.getPartition(id) == null)) { throw new AlterCancelException("partitions changed during insert"); } tmpPartitionNames = getTmpPartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId).getName()) .collect(Collectors.toList()); optimizeClause.getSourcePartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId)).forEach( partition -> { partitionNames.add(partition.getName()); partitionLastVersion.add(partition.getSubPartitions().stream() .mapToLong(PhysicalPartition::getVisibleVersion).sum()); } ); tableCoumnNames = targetTable.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn()) .map(Column::getName).collect(Collectors.toList()); } finally { locker.unLockDatabase(db, LockType.READ); } for (int i = 0; i < tmpPartitionNames.size(); ++i) { String tmpPartitionName = tmpPartitionNames.get(i); String partitionName = partitionNames.get(i); String rewriteSql = "insert into " + tableName + " TEMPORARY PARTITION (" + tmpPartitionName + ") select " + Joiner.on(", ").join(tableCoumnNames) + " from " + tableName + " partition (" + partitionName + ")"; String taskName = getName() + "_" + tmpPartitionName; OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName); rewriteTask.setPartitionName(partitionName); rewriteTask.setTempPartitionName(tmpPartitionName); rewriteTask.setLastVersion(partitionLastVersion.get(i)); rewriteTasks.add(rewriteTask); } TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); for (OptimizeTask rewriteTask : rewriteTasks) { try { taskManager.createTask(rewriteTask, false); taskManager.executeTask(rewriteTask.getName()); LOG.debug("create rewrite task {}", rewriteTask.toString()); } catch (DdlException e) { rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED); LOG.warn("create rewrite task failed", e); } } this.jobState = JobState.RUNNING; span.addEvent("setRunning"); LOG.info("transfer optimize job {} state to {}", jobId, this.jobState); } /** * runRunningJob() * 1. Wait insert into tasks to be finished. * 2. Replace partitions with temp partitions. * 3. Set job'state as FINISHED. */ @Override protected void runRunningJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.RUNNING, jobState); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("Databasee " + dbId + " does not exist"); } OlapTable tbl = null; Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { throw new AlterCancelException("Table " + tableId + " does not exist"); } } finally { locker.unLockDatabase(db, LockType.READ); } boolean allFinished = true; int progress = 0; for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED || rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) { progress += 100 / rewriteTasks.size(); continue; } TaskRun taskRun = GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager() .getRunnableTaskRun(rewriteTask.getId()); if (taskRun != null) { if (taskRun.getStatus() != null) { progress += taskRun.getStatus().getProgress() / rewriteTasks.size(); } allFinished = false; continue; } TaskRunStatus status = GlobalStateMgr.getCurrentState().getTaskManager() .getTaskRunManager().getTaskRunHistory().getTaskByName(rewriteTask.getName()); if (status == null) { allFinished = false; continue; } if (status.getState() == Constants.TaskRunState.FAILED) { LOG.warn("optimize task {} failed", rewriteTask.getName()); rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED); } progress += 100 / rewriteTasks.size(); } if (!allFinished) { LOG.debug("wait insert tasks to be finished, optimize job: {}", jobId); this.progress = progress; return; } this.progress = 99; LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId); locker.lockDatabase(db, LockType.WRITE); try { onFinished(db, tbl); } finally { locker.unLockDatabase(db, LockType.WRITE); } this.progress = 100; this.jobState = JobState.FINISHED; this.finishedTimeMs = System.currentTimeMillis(); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); LOG.info("optimize job finished: {}", jobId); this.span.end(); } @Override protected void runFinishedRewritingJob() { } /** * cancelImpl() can be called any time any place. * We need to clean any possible residual of this job. */ @Override protected synchronized boolean cancelImpl(String errMsg) { if (jobState.isFinalState()) { return false; } cancelInternal(); jobState = JobState.CANCELLED; this.errMsg = errMsg; this.finishedTimeMs = System.currentTimeMillis(); LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); span.setStatus(StatusCode.ERROR, errMsg); span.end(); return true; } private void cancelInternal() { Database db = null; Locker locker = new Locker(); try { db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("database id:" + dbId + " does not exist"); } if (!locker.lockAndCheckExist(db, LockType.WRITE)) { throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed"); } } catch (Exception e) { LOG.warn("get and write lock database failed when cancel job: {}", jobId, e); return; } try { Table table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName()); } Preconditions.checkState(table instanceof OlapTable); OlapTable targetTable = (OlapTable) table; Set<Tablet> sourceTablets = Sets.newHashSet(); if (getTmpPartitionIds() != null) { for (long pid : getTmpPartitionIds()) { LOG.info("optimize job {} drop temp partition:{}", jobId, pid); Partition partition = targetTable.getPartition(pid); if (partition != null) { for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); } else { LOG.warn("partition {} is null", pid); } } } sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); targetTable.setState(OlapTableState.NORMAL); } catch (Exception e) { LOG.warn("exception when cancel optimize job.", e); } finally { locker.unLockDatabase(db, LockType.WRITE); } } protected boolean isPreviousLoadFinished() throws AnalysisException { return GlobalStateMgr.getCurrentGlobalTransactionMgr() .isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); } /** * Replay job in PENDING state. * Should replay all changes before this job's state transfer to PENDING. */ private void replayPending(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { OlapTable tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { return; } tbl.setState(OlapTableState.SCHEMA_CHANGE); } finally { locker.unLockDatabase(db, LockType.WRITE); } this.jobState = JobState.PENDING; this.watershedTxnId = replayedJob.watershedTxnId; this.optimizeOperation = replayedJob.optimizeOperation; LOG.info("replay pending optimize job: {}", jobId); } /** * Replay job in WAITING_TXN state. * Should replay all changes in runPendingJob() */ private void replayWaitingTxn(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { return; } OlapTable tbl = null; Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { return; } } finally { locker.unLockDatabase(db, LockType.WRITE); } for (long id : replayedJob.getTmpPartitionIds()) { tmpPartitionIds.add(id); } this.jobState = JobState.WAITING_TXN; this.watershedTxnId = replayedJob.watershedTxnId; this.optimizeOperation = replayedJob.optimizeOperation; LOG.info("replay waiting txn optimize job: {}", jobId); } private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) { this.sourcePartitionNames = replayedJob.sourcePartitionNames; this.tmpPartitionNames = replayedJob.tmpPartitionNames; this.allPartitionOptimized = replayedJob.allPartitionOptimized; this.optimizeOperation = replayedJob.optimizeOperation; Set<Tablet> sourceTablets = Sets.newHashSet(); for (long id : replayedJob.getTmpPartitionIds()) { Partition partition = targetTable.getPartition(id); if (partition != null) { for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); } } sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); if (allPartitionOptimized) { this.distributionInfo = replayedJob.distributionInfo; LOG.debug("set distribution info to table: {}", distributionInfo); targetTable.setDefaultDistributionInfo(distributionInfo); } targetTable.setState(OlapTableState.NORMAL); LOG.info("finish replay optimize job {} dbId:{}, tableId:{}," + "source partitions:{}, tmp partitions:{}, allOptimized:{}", jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } /** * Replay job in FINISHED state. * Should replay all changes in runRuningJob() */ private void replayFinished(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db != null) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { OlapTable tbl = (OlapTable) db.getTable(tableId); if (tbl != null) { onReplayFinished(replayedJob, tbl); } } finally { locker.unLockDatabase(db, LockType.WRITE); } } this.jobState = JobState.FINISHED; this.finishedTimeMs = replayedJob.finishedTimeMs; LOG.info("replay finished optimize job: {}", jobId); } /** * Replay job in CANCELLED state. */ private void replayCancelled(OptimizeJobV2 replayedJob) { cancelInternal(); this.jobState = JobState.CANCELLED; this.finishedTimeMs = replayedJob.finishedTimeMs; this.errMsg = replayedJob.errMsg; LOG.info("replay cancelled optimize job: {}", jobId); } @Override public void replay(AlterJobV2 replayedJob) { OptimizeJobV2 replayedOptimizeJob = (OptimizeJobV2) replayedJob; switch (replayedJob.jobState) { case PENDING: replayPending(replayedOptimizeJob); break; case WAITING_TXN: replayWaitingTxn(replayedOptimizeJob); break; case FINISHED: replayFinished(replayedOptimizeJob); break; case CANCELLED: replayCancelled(replayedOptimizeJob); break; default: break; } } @Override protected void getInfo(List<List<Comparable>> infos) { List<Comparable> info = Lists.newArrayList(); info.add(jobId); info.add(tableName); info.add(TimeUtils.longToTimeString(createTimeMs)); info.add(TimeUtils.longToTimeString(finishedTimeMs)); info.add(optimizeOperation != null ? optimizeOperation : ""); info.add(watershedTxnId); info.add(jobState.name()); info.add(errMsg); info.add(progress); info.add(timeoutMs / 1000); infos.add(info); } public void setJobState(JobState jobState) { this.jobState = jobState; } @Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, OptimizeJobV2.class); Text.writeString(out, json); } @Override public void gsonPostProcess() throws IOException { if (jobState != JobState.PENDING) { return; } } @Override public Optional<Long> getTransactionId() { return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId); } }
class OptimizeJobV2 extends AlterJobV2 implements GsonPostProcessable { private static final Logger LOG = LogManager.getLogger(OptimizeJobV2.class); @SerializedName(value = "watershedTxnId") protected long watershedTxnId = -1; private final String postfix; @SerializedName(value = "tmpPartitionIds") private List<Long> tmpPartitionIds = Lists.newArrayList(); private OptimizeClause optimizeClause; private String dbName = ""; private Map<String, String> properties = Maps.newHashMap(); @SerializedName(value = "rewriteTasks") private List<OptimizeTask> rewriteTasks = Lists.newArrayList(); private int progress = 0; @SerializedName(value = "sourcePartitionNames") private List<String> sourcePartitionNames = Lists.newArrayList(); @SerializedName(value = "tmpPartitionNames") private List<String> tmpPartitionNames = Lists.newArrayList(); @SerializedName(value = "allPartitionOptimized") private Boolean allPartitionOptimized = false; @SerializedName(value = "distributionInfo") private DistributionInfo distributionInfo; @SerializedName(value = "optimizeOperation") private String optimizeOperation = ""; public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs, OptimizeClause optimizeClause) { this(jobId, dbId, tableId, tableName, timeoutMs); this.optimizeClause = optimizeClause; } public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) { super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs); this.postfix = "_" + jobId; } public List<Long> getTmpPartitionIds() { return tmpPartitionIds; } public void setTmpPartitionIds(List<Long> tmpPartitionIds) { this.tmpPartitionIds = tmpPartitionIds; } public String getName() { return "optimize-" + this.postfix; } public Map<String, String> getProperties() { return properties; } public List<OptimizeTask> getOptimizeTasks() { return rewriteTasks; } private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException { Table table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName()); } Preconditions.checkState(table instanceof OlapTable); return (OlapTable) table; } /** * runPendingJob(): * 1. Create all temp partitions and wait them finished. * 2. Get a new transaction id, then set job's state to WAITING_TXN */ @Override protected void runPendingJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.PENDING, jobState); LOG.info("begin to send create temp partitions. job: {}", jobId); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("Database " + dbId + " does not exist"); } if (!checkTableStable(db)) { return; } if (optimizeClause == null) { throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId); } for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) { tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId()); } long createPartitionStartTimestamp = System.currentTimeMillis(); OlapTable targetTable; Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { targetTable = checkAndGetTable(db, tableId); } finally { locker.unLockDatabase(db, LockType.READ); } try { PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix, optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc()); LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId); } catch (Exception e) { LOG.warn("create temp partitions failed", e); throw new AlterCancelException("create temp partitions failed " + e); } long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp; this.watershedTxnId = GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); this.jobState = JobState.WAITING_TXN; this.optimizeOperation = optimizeClause.toString(); span.setAttribute("createPartitionElapse", createPartitionElapse); span.setAttribute("watershedTxnId", this.watershedTxnId); span.addEvent("setWaitingTxn"); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId); } /** * runWaitingTxnJob(): * 1. Wait the transactions before the watershedTxnId to be finished. * 2. If all previous transactions finished, start insert into data to temp partitions. * 3. Change job state to RUNNING. */ @Override protected void runWaitingTxnJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState); if (optimizeClause == null) { throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId); } try { if (!isPreviousLoadFinished()) { LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId); return; } } catch (AnalysisException e) { throw new AlterCancelException(e.getMessage()); } LOG.info("previous transactions are all finished, begin to optimize table. job: {}", jobId); List<String> tmpPartitionNames; List<String> partitionNames = Lists.newArrayList(); List<Long> partitionLastVersion = Lists.newArrayList(); List<String> tableCoumnNames = Lists.newArrayList(); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("database id: " + dbId + " does not exist"); } Locker locker = new Locker(); if (!locker.lockAndCheckExist(db, LockType.READ)) { throw new AlterCancelException("insert overwrite commit failed because locking db: " + dbId + " failed"); } try { dbName = db.getFullName(); OlapTable targetTable = checkAndGetTable(db, tableId); if (getTmpPartitionIds().stream().anyMatch(id -> targetTable.getPartition(id) == null)) { throw new AlterCancelException("partitions changed during insert"); } tmpPartitionNames = getTmpPartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId).getName()) .collect(Collectors.toList()); optimizeClause.getSourcePartitionIds().stream() .map(partitionId -> targetTable.getPartition(partitionId)).forEach( partition -> { partitionNames.add(partition.getName()); partitionLastVersion.add(partition.getSubPartitions().stream() .mapToLong(PhysicalPartition::getVisibleVersion).sum()); } ); tableCoumnNames = targetTable.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn()) .map(Column::getName).collect(Collectors.toList()); } finally { locker.unLockDatabase(db, LockType.READ); } for (int i = 0; i < tmpPartitionNames.size(); ++i) { String tmpPartitionName = tmpPartitionNames.get(i); String partitionName = partitionNames.get(i); String rewriteSql = "insert into " + tableName + " TEMPORARY PARTITION (" + tmpPartitionName + ") select " + Joiner.on(", ").join(tableCoumnNames) + " from " + tableName + " partition (" + partitionName + ")"; String taskName = getName() + "_" + tmpPartitionName; OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName); rewriteTask.setPartitionName(partitionName); rewriteTask.setTempPartitionName(tmpPartitionName); rewriteTask.setLastVersion(partitionLastVersion.get(i)); rewriteTasks.add(rewriteTask); } TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); for (OptimizeTask rewriteTask : rewriteTasks) { try { taskManager.createTask(rewriteTask, false); taskManager.executeTask(rewriteTask.getName()); LOG.debug("create rewrite task {}", rewriteTask.toString()); } catch (DdlException e) { rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED); LOG.warn("create rewrite task failed", e); } } this.jobState = JobState.RUNNING; span.addEvent("setRunning"); LOG.info("transfer optimize job {} state to {}", jobId, this.jobState); } /** * runRunningJob() * 1. Wait insert into tasks to be finished. * 2. Replace partitions with temp partitions. * 3. Set job'state as FINISHED. */ @Override protected void runRunningJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.RUNNING, jobState); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("Databasee " + dbId + " does not exist"); } OlapTable tbl = null; Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { throw new AlterCancelException("Table " + tableId + " does not exist"); } } finally { locker.unLockDatabase(db, LockType.READ); } boolean allFinished = true; int progress = 0; for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED || rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) { progress += 100 / rewriteTasks.size(); continue; } TaskRun taskRun = GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager() .getRunnableTaskRun(rewriteTask.getId()); if (taskRun != null) { if (taskRun.getStatus() != null) { progress += taskRun.getStatus().getProgress() / rewriteTasks.size(); } allFinished = false; continue; } TaskRunStatus status = GlobalStateMgr.getCurrentState().getTaskManager() .getTaskRunManager().getTaskRunHistory().getTaskByName(rewriteTask.getName()); if (status == null) { allFinished = false; continue; } if (status.getState() == Constants.TaskRunState.FAILED) { LOG.warn("optimize task {} failed", rewriteTask.getName()); rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED); } progress += 100 / rewriteTasks.size(); } if (!allFinished) { LOG.debug("wait insert tasks to be finished, optimize job: {}", jobId); this.progress = progress; return; } this.progress = 99; LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId); locker.lockDatabase(db, LockType.WRITE); try { onFinished(db, tbl); } finally { locker.unLockDatabase(db, LockType.WRITE); } this.progress = 100; this.jobState = JobState.FINISHED; this.finishedTimeMs = System.currentTimeMillis(); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); LOG.info("optimize job finished: {}", jobId); this.span.end(); } @Override protected void runFinishedRewritingJob() { } /** * cancelImpl() can be called any time any place. * We need to clean any possible residual of this job. */ @Override protected synchronized boolean cancelImpl(String errMsg) { if (jobState.isFinalState()) { return false; } cancelInternal(); jobState = JobState.CANCELLED; this.errMsg = errMsg; this.finishedTimeMs = System.currentTimeMillis(); LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg); GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this); span.setStatus(StatusCode.ERROR, errMsg); span.end(); return true; } private void cancelInternal() { Database db = null; Locker locker = new Locker(); try { db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { throw new AlterCancelException("database id:" + dbId + " does not exist"); } if (!locker.lockAndCheckExist(db, LockType.WRITE)) { throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed"); } } catch (Exception e) { LOG.warn("get and write lock database failed when cancel job: {}", jobId, e); return; } try { Table table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName()); } Preconditions.checkState(table instanceof OlapTable); OlapTable targetTable = (OlapTable) table; Set<Tablet> sourceTablets = Sets.newHashSet(); if (getTmpPartitionIds() != null) { for (long pid : getTmpPartitionIds()) { LOG.info("optimize job {} drop temp partition:{}", jobId, pid); Partition partition = targetTable.getPartition(pid); if (partition != null) { for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); } else { LOG.warn("partition {} is null", pid); } } } sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); targetTable.setState(OlapTableState.NORMAL); } catch (Exception e) { LOG.warn("exception when cancel optimize job.", e); } finally { locker.unLockDatabase(db, LockType.WRITE); } } protected boolean isPreviousLoadFinished() throws AnalysisException { return GlobalStateMgr.getCurrentGlobalTransactionMgr() .isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); } /** * Replay job in PENDING state. * Should replay all changes before this job's state transfer to PENDING. */ private void replayPending(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { OlapTable tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { return; } tbl.setState(OlapTableState.SCHEMA_CHANGE); } finally { locker.unLockDatabase(db, LockType.WRITE); } this.jobState = JobState.PENDING; this.watershedTxnId = replayedJob.watershedTxnId; this.optimizeOperation = replayedJob.optimizeOperation; LOG.info("replay pending optimize job: {}", jobId); } /** * Replay job in WAITING_TXN state. * Should replay all changes in runPendingJob() */ private void replayWaitingTxn(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { return; } OlapTable tbl = null; Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { tbl = (OlapTable) db.getTable(tableId); if (tbl == null) { return; } } finally { locker.unLockDatabase(db, LockType.WRITE); } for (long id : replayedJob.getTmpPartitionIds()) { tmpPartitionIds.add(id); } this.jobState = JobState.WAITING_TXN; this.watershedTxnId = replayedJob.watershedTxnId; this.optimizeOperation = replayedJob.optimizeOperation; LOG.info("replay waiting txn optimize job: {}", jobId); } private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) { this.sourcePartitionNames = replayedJob.sourcePartitionNames; this.tmpPartitionNames = replayedJob.tmpPartitionNames; this.allPartitionOptimized = replayedJob.allPartitionOptimized; this.optimizeOperation = replayedJob.optimizeOperation; Set<Tablet> sourceTablets = Sets.newHashSet(); for (long id : replayedJob.getTmpPartitionIds()) { Partition partition = targetTable.getPartition(id); if (partition != null) { for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); } } sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete); if (allPartitionOptimized) { this.distributionInfo = replayedJob.distributionInfo; LOG.debug("set distribution info to table: {}", distributionInfo); targetTable.setDefaultDistributionInfo(distributionInfo); } targetTable.setState(OlapTableState.NORMAL); LOG.info("finish replay optimize job {} dbId:{}, tableId:{}," + "source partitions:{}, tmp partitions:{}, allOptimized:{}", jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } /** * Replay job in FINISHED state. * Should replay all changes in runRuningJob() */ private void replayFinished(OptimizeJobV2 replayedJob) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db != null) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { OlapTable tbl = (OlapTable) db.getTable(tableId); if (tbl != null) { onReplayFinished(replayedJob, tbl); } } finally { locker.unLockDatabase(db, LockType.WRITE); } } this.jobState = JobState.FINISHED; this.finishedTimeMs = replayedJob.finishedTimeMs; LOG.info("replay finished optimize job: {}", jobId); } /** * Replay job in CANCELLED state. */ private void replayCancelled(OptimizeJobV2 replayedJob) { cancelInternal(); this.jobState = JobState.CANCELLED; this.finishedTimeMs = replayedJob.finishedTimeMs; this.errMsg = replayedJob.errMsg; LOG.info("replay cancelled optimize job: {}", jobId); } @Override public void replay(AlterJobV2 replayedJob) { OptimizeJobV2 replayedOptimizeJob = (OptimizeJobV2) replayedJob; switch (replayedJob.jobState) { case PENDING: replayPending(replayedOptimizeJob); break; case WAITING_TXN: replayWaitingTxn(replayedOptimizeJob); break; case FINISHED: replayFinished(replayedOptimizeJob); break; case CANCELLED: replayCancelled(replayedOptimizeJob); break; default: break; } } @Override protected void getInfo(List<List<Comparable>> infos) { List<Comparable> info = Lists.newArrayList(); info.add(jobId); info.add(tableName); info.add(TimeUtils.longToTimeString(createTimeMs)); info.add(TimeUtils.longToTimeString(finishedTimeMs)); info.add(optimizeOperation != null ? optimizeOperation : ""); info.add(watershedTxnId); info.add(jobState.name()); info.add(errMsg); info.add(progress); info.add(timeoutMs / 1000); infos.add(info); } public void setJobState(JobState jobState) { this.jobState = jobState; } @Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, OptimizeJobV2.class); Text.writeString(out, json); } @Override public void gsonPostProcess() throws IOException { if (jobState != JobState.PENDING) { return; } } @Override public Optional<Long> getTransactionId() { return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId); } }
When is this the case?
public void validate(VespaModel model, DeployState deployState) { if (deployState.isHosted() && deployState.zone().environment().isProduction()) { List<String> offendingClusters = new ArrayList<>(); for (ContainerCluster cluster : model.getContainerClusters().values()) { if (cluster.getHttp() == null || ! cluster.getHttp().getAccessControl().isPresent() || ! cluster.getHttp().getAccessControl().get().writeEnabled) if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) throw new IllegalArgumentException( "Access-control must be enabled for write operations to container clusters in production zones: " + mkString(offendingClusters, "[", ", ", "].")); } }
if (cluster.getHttp() == null
public void validate(VespaModel model, DeployState deployState) { if (! deployState.isHosted()) return; if (! deployState.zone().environment().isProduction()) return; if (model.getAdmin().getApplicationType() != ApplicationType.DEFAULT) return; if (deployState.zone().system() != SystemName.cd) return; List<String> offendingClusters = new ArrayList<>(); for (ContainerCluster cluster : model.getContainerClusters().values()) { if (cluster.getHttp() == null || ! cluster.getHttp().getAccessControl().isPresent() || ! cluster.getHttp().getAccessControl().get().writeEnabled) if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) throw new IllegalArgumentException( "Access-control must be enabled for write operations to container clusters in production zones: " + mkString(offendingClusters, "[", ", ", "].")); }
class AccessControlValidator extends Validator { @Override private boolean hasHandlerThatNeedsProtection(ContainerCluster cluster) { return cluster.getHandlers().stream().anyMatch(this::handlerNeedsProtection); } private boolean handlerNeedsProtection(Handler<?> handler) { return ! isBuiltinGetOnly(handler) && hasNonMbusBinding(handler); } private boolean hasNonMbusBinding(Handler<?> handler) { return handler.getServerBindings().stream().anyMatch(binding -> ! binding.startsWith("mbus")); } }
class AccessControlValidator extends Validator { @Override private boolean hasHandlerThatNeedsProtection(ContainerCluster cluster) { return cluster.getHandlers().stream().anyMatch(this::handlerNeedsProtection); } private boolean handlerNeedsProtection(Handler<?> handler) { return ! isBuiltinGetOnly(handler) && hasNonMbusBinding(handler); } private boolean hasNonMbusBinding(Handler<?> handler) { return handler.getServerBindings().stream().anyMatch(binding -> ! binding.startsWith("mbus")); } }
When there is no 'http' element in services.xml. It's covered in the unit test `cluster_with_handler_fails_validation_without_http_element`
public void validate(VespaModel model, DeployState deployState) { if (deployState.isHosted() && deployState.zone().environment().isProduction()) { List<String> offendingClusters = new ArrayList<>(); for (ContainerCluster cluster : model.getContainerClusters().values()) { if (cluster.getHttp() == null || ! cluster.getHttp().getAccessControl().isPresent() || ! cluster.getHttp().getAccessControl().get().writeEnabled) if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) throw new IllegalArgumentException( "Access-control must be enabled for write operations to container clusters in production zones: " + mkString(offendingClusters, "[", ", ", "].")); } }
if (cluster.getHttp() == null
public void validate(VespaModel model, DeployState deployState) { if (! deployState.isHosted()) return; if (! deployState.zone().environment().isProduction()) return; if (model.getAdmin().getApplicationType() != ApplicationType.DEFAULT) return; if (deployState.zone().system() != SystemName.cd) return; List<String> offendingClusters = new ArrayList<>(); for (ContainerCluster cluster : model.getContainerClusters().values()) { if (cluster.getHttp() == null || ! cluster.getHttp().getAccessControl().isPresent() || ! cluster.getHttp().getAccessControl().get().writeEnabled) if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) throw new IllegalArgumentException( "Access-control must be enabled for write operations to container clusters in production zones: " + mkString(offendingClusters, "[", ", ", "].")); }
class AccessControlValidator extends Validator { @Override private boolean hasHandlerThatNeedsProtection(ContainerCluster cluster) { return cluster.getHandlers().stream().anyMatch(this::handlerNeedsProtection); } private boolean handlerNeedsProtection(Handler<?> handler) { return ! isBuiltinGetOnly(handler) && hasNonMbusBinding(handler); } private boolean hasNonMbusBinding(Handler<?> handler) { return handler.getServerBindings().stream().anyMatch(binding -> ! binding.startsWith("mbus")); } }
class AccessControlValidator extends Validator { @Override private boolean hasHandlerThatNeedsProtection(ContainerCluster cluster) { return cluster.getHandlers().stream().anyMatch(this::handlerNeedsProtection); } private boolean handlerNeedsProtection(Handler<?> handler) { return ! isBuiltinGetOnly(handler) && hasNonMbusBinding(handler); } private boolean hasNonMbusBinding(Handler<?> handler) { return handler.getServerBindings().stream().anyMatch(binding -> ! binding.startsWith("mbus")); } }
Feels cleaner to me if this were moved after the loop which sets the elements in `bucketSpacesJson`
private static JSONObject distributionStateToJson(DistributionState state) throws Exception { JSONObject result = new JSONObject(); result.put("baseline", state.getBaselineState()); JSONArray bucketSpacesJson = new JSONArray(); result.put("bucket-spaces", bucketSpacesJson); for (Map.Entry<String, String> entry : state.getBucketSpaceStates().entrySet()) { JSONObject bucketSpaceJson = new JSONObject(); bucketSpaceJson.put("name", entry.getKey()); bucketSpaceJson.put("state", entry.getValue()); bucketSpacesJson.put(bucketSpaceJson); } return result; }
result.put("bucket-spaces", bucketSpacesJson);
private static JSONObject distributionStateToJson(DistributionState state) throws Exception { JSONObject result = new JSONObject(); result.put("baseline", state.getBaselineState()); JSONArray bucketSpacesJson = new JSONArray(); for (Map.Entry<String, String> entry : state.getBucketSpaceStates().entrySet()) { JSONObject bucketSpaceJson = new JSONObject(); bucketSpaceJson.put("name", entry.getKey()); bucketSpaceJson.put("state", entry.getValue()); bucketSpacesJson.put(bucketSpaceJson); } result.put("bucket-spaces", bucketSpacesJson); return result; }
class JsonWriter { private String pathPrefix = "/"; public JsonWriter() { } public void setDefaultPathPrefix(String defaultPathPrefix) { if (defaultPathPrefix.isEmpty() || defaultPathPrefix.charAt(0) != '/') { throw new IllegalArgumentException("Path prefix must start with a slash"); } this.pathPrefix = defaultPathPrefix; } public JSONObject createJson(UnitResponse data) throws Exception { JSONObject json = new JSONObject(); fillInJson(data, json); return json; } public void fillInJson(UnitResponse data, JSONObject json) throws Exception { UnitAttributes attributes = data.getAttributes(); if (attributes != null) { fillInJson(attributes, json); } CurrentUnitState stateData = data.getCurrentState(); if (stateData != null) { fillInJson(stateData, json); } UnitMetrics metrics = data.getMetrics(); if (metrics != null) { fillInJson(metrics, json); } Map<String, SubUnitList> subUnits = data.getSubUnits(); if (subUnits != null) { fillInJson(subUnits, json); } DistributionStates distributionStates = data.getDistributionStates(); if (distributionStates != null) { fillInJson(distributionStates, json); } } public void fillInJson(CurrentUnitState stateData, JSONObject json) throws Exception { JSONObject stateJson = new JSONObject(); json.put("state", stateJson); Map<String, UnitState> state = stateData.getStatePerType(); for (Map.Entry<String, UnitState> e : state.entrySet()) { String stateType = e.getKey(); UnitState unitState = e.getValue(); JSONObject stateTypeJson = new JSONObject() .put("state", unitState.getId()) .put("reason", unitState.getReason()); stateJson.put(stateType, stateTypeJson); } } public void fillInJson(UnitMetrics metrics, JSONObject json) throws Exception { JSONObject metricsJson = new JSONObject(); for (Map.Entry<String, Number> e : metrics.getMetricMap().entrySet()) { metricsJson.put(e.getKey(), e.getValue()); } json.put("metrics", metricsJson); } public void fillInJson(UnitAttributes attributes, JSONObject json) throws Exception { JSONObject attributesJson = new JSONObject(); for (Map.Entry<String, String> e : attributes.getAttributeValues().entrySet()) { attributesJson.put(e.getKey(), e.getValue()); } json.put("attributes", attributesJson); } public void fillInJson(Map<String, SubUnitList> subUnitMap, JSONObject json) throws Exception { for(Map.Entry<String, SubUnitList> e : subUnitMap.entrySet()) { String subUnitType = e.getKey(); JSONObject typeJson = new JSONObject(); for (Map.Entry<String, String> f : e.getValue().getSubUnitLinks().entrySet()) { JSONObject linkJson = new JSONObject(); linkJson.put("link", pathPrefix + "/" + f.getValue()); typeJson.put(f.getKey(), linkJson); } for (Map.Entry<String, UnitResponse> f : e.getValue().getSubUnits().entrySet()) { JSONObject subJson = new JSONObject(); fillInJson(f.getValue(), subJson); typeJson.put(f.getKey(), subJson); } json.put(subUnitType, typeJson); } } private static void fillInJson(DistributionStates states, JSONObject json) throws Exception { JSONObject statesJson = new JSONObject(); statesJson.put("published", distributionStateToJson(states.getPublishedState())); json.put("distribution-states", statesJson); } public JSONObject createErrorJson(String description) { JSONObject o = new JSONObject(); try{ o.put("message", description); } catch (JSONException e) { } return o; } public JSONObject createJson(SetResponse setResponse) throws JSONException { JSONObject jsonObject = new JSONObject(); jsonObject.put("wasModified", setResponse.getWasModified()); jsonObject.put("reason", setResponse.getReason()); return jsonObject; } }
class JsonWriter { private String pathPrefix = "/"; public JsonWriter() { } public void setDefaultPathPrefix(String defaultPathPrefix) { if (defaultPathPrefix.isEmpty() || defaultPathPrefix.charAt(0) != '/') { throw new IllegalArgumentException("Path prefix must start with a slash"); } this.pathPrefix = defaultPathPrefix; } public JSONObject createJson(UnitResponse data) throws Exception { JSONObject json = new JSONObject(); fillInJson(data, json); return json; } public void fillInJson(UnitResponse data, JSONObject json) throws Exception { UnitAttributes attributes = data.getAttributes(); if (attributes != null) { fillInJson(attributes, json); } CurrentUnitState stateData = data.getCurrentState(); if (stateData != null) { fillInJson(stateData, json); } UnitMetrics metrics = data.getMetrics(); if (metrics != null) { fillInJson(metrics, json); } Map<String, SubUnitList> subUnits = data.getSubUnits(); if (subUnits != null) { fillInJson(subUnits, json); } DistributionStates distributionStates = data.getDistributionStates(); if (distributionStates != null) { fillInJson(distributionStates, json); } } public void fillInJson(CurrentUnitState stateData, JSONObject json) throws Exception { JSONObject stateJson = new JSONObject(); json.put("state", stateJson); Map<String, UnitState> state = stateData.getStatePerType(); for (Map.Entry<String, UnitState> e : state.entrySet()) { String stateType = e.getKey(); UnitState unitState = e.getValue(); JSONObject stateTypeJson = new JSONObject() .put("state", unitState.getId()) .put("reason", unitState.getReason()); stateJson.put(stateType, stateTypeJson); } } public void fillInJson(UnitMetrics metrics, JSONObject json) throws Exception { JSONObject metricsJson = new JSONObject(); for (Map.Entry<String, Number> e : metrics.getMetricMap().entrySet()) { metricsJson.put(e.getKey(), e.getValue()); } json.put("metrics", metricsJson); } public void fillInJson(UnitAttributes attributes, JSONObject json) throws Exception { JSONObject attributesJson = new JSONObject(); for (Map.Entry<String, String> e : attributes.getAttributeValues().entrySet()) { attributesJson.put(e.getKey(), e.getValue()); } json.put("attributes", attributesJson); } public void fillInJson(Map<String, SubUnitList> subUnitMap, JSONObject json) throws Exception { for(Map.Entry<String, SubUnitList> e : subUnitMap.entrySet()) { String subUnitType = e.getKey(); JSONObject typeJson = new JSONObject(); for (Map.Entry<String, String> f : e.getValue().getSubUnitLinks().entrySet()) { JSONObject linkJson = new JSONObject(); linkJson.put("link", pathPrefix + "/" + f.getValue()); typeJson.put(f.getKey(), linkJson); } for (Map.Entry<String, UnitResponse> f : e.getValue().getSubUnits().entrySet()) { JSONObject subJson = new JSONObject(); fillInJson(f.getValue(), subJson); typeJson.put(f.getKey(), subJson); } json.put(subUnitType, typeJson); } } private static void fillInJson(DistributionStates states, JSONObject json) throws Exception { JSONObject statesJson = new JSONObject(); statesJson.put("published", distributionStateToJson(states.getPublishedState())); json.put("distribution-states", statesJson); } public JSONObject createErrorJson(String description) { JSONObject o = new JSONObject(); try{ o.put("message", description); } catch (JSONException e) { } return o; } public JSONObject createJson(SetResponse setResponse) throws JSONException { JSONObject jsonObject = new JSONObject(); jsonObject.put("wasModified", setResponse.getWasModified()); jsonObject.put("reason", setResponse.getReason()); return jsonObject; } }
"completedAt" is misleading; call this "runningUntil" or something?
private Duration averageDeploymentDuration(Application application, Instant now) { List<Duration> jobDurations = application.deploymentJobs().jobStatus().values().stream() .filter(status -> status.lastTriggered().isPresent()) .map(status -> { Instant triggeredAt = status.lastTriggered().get().at(); Instant completedAt = status.lastCompleted() .map(JobStatus.JobRun::at) .filter(at -> at.isAfter(triggeredAt)) .orElse(now); return Duration.between(triggeredAt, completedAt); }) .collect(Collectors.toList()); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); }
Instant completedAt = status.lastCompleted()
private Duration averageDeploymentDuration(Application application, Instant now) { List<Duration> jobDurations = application.deploymentJobs().jobStatus().values().stream() .filter(status -> status.lastTriggered().isPresent()) .map(status -> { Instant triggeredAt = status.lastTriggered().get().at(); Instant runningUntil = status.lastCompleted() .map(JobStatus.JobRun::at) .filter(at -> at.isAfter(triggeredAt)) .orElse(now); return Duration.between(triggeredAt, runningUntil); }) .collect(Collectors.toList()); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); }
class MetricsReporter extends Maintainer { public static final String convergeMetric = "seconds.since.last.chef.convergence"; public static final String deploymentFailMetric = "deployment.failurePercentage"; public static final String deploymentAverageDuration = "deployment.averageDuration"; public static final String remainingRotations = "remaining_rotations"; private final Metric metric; private final Chef chefClient; private final Clock clock; private final SystemName system; public MetricsReporter(Controller controller, Metric metric, Chef chefClient, JobControl jobControl, SystemName system) { this(controller, metric, chefClient, Clock.systemUTC(), jobControl, system); } public MetricsReporter(Controller controller, Metric metric, Chef chefClient, Clock clock, JobControl jobControl, SystemName system) { super(controller, Duration.ofMinutes(1), jobControl); this.metric = metric; this.chefClient = chefClient; this.clock = clock; this.system = system; } @Override public void maintain() { reportChefMetrics(); reportDeploymentMetrics(); reportRemainingRotations(); } private void reportRemainingRotations() { try (RotationLock lock = controller().applications().rotationRepository().lock()) { int availableRotations = controller().applications().rotationRepository().availableRotations(lock).size(); metric.set(remainingRotations, availableRotations, metric.createContext(Collections.emptyMap())); } } private void reportChefMetrics() { String query = "chef_environment:hosted*"; if (system == SystemName.cd) { query += " AND hosted_system:" + system; } PartialNodeResult nodeResult = chefClient.partialSearchNodes(query, Arrays.asList( AttributeMapping.simpleMapping("fqdn"), AttributeMapping.simpleMapping("ohai_time"), AttributeMapping.deepMapping("tenant", Arrays.asList("hosted", "owner", "tenant")), AttributeMapping.deepMapping("application", Arrays.asList("hosted", "owner", "application")), AttributeMapping.deepMapping("instance", Arrays.asList("hosted", "owner", "instance")), AttributeMapping.deepMapping("environment", Arrays.asList("hosted", "environment")), AttributeMapping.deepMapping("region", Arrays.asList("hosted", "region")), AttributeMapping.deepMapping("system", Arrays.asList("hosted", "system")) )); keepNodesWithSystem(nodeResult, system); Instant instant = clock.instant(); for (PartialNode node : nodeResult.rows) { String hostname = node.getFqdn(); long secondsSinceConverge = Duration.between(Instant.ofEpochSecond(node.getOhaiTime().longValue()), instant).getSeconds(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("host", hostname); dimensions.put("system", node.getValue("system").orElse("main")); Optional<String> environment = node.getValue("environment"); Optional<String> region = node.getValue("region"); if(environment.isPresent() && region.isPresent()) { dimensions.put("zone", String.format("%s.%s", environment.get(), region.get())); } node.getValue("tenant").ifPresent(tenant -> dimensions.put("tenantName", tenant)); Optional<String> application = node.getValue("application"); if (application.isPresent()) { dimensions.put("app",String.format("%s.%s", application.get(), node.getValue("instance").orElse("default"))); } Metric.Context context = metric.createContext(dimensions); metric.set(convergeMetric, secondsSinceConverge, context); } } private void reportDeploymentMetrics() { metric.set(deploymentFailMetric, deploymentFailRatio() * 100, metric.createContext(Collections.emptyMap())); for (Map.Entry<ApplicationId, Duration> entry : averageDeploymentDurations().entrySet()) { metric.set(deploymentAverageDuration, entry.getValue().getSeconds(), metric.createContext(Collections.singletonMap("application", entry.getKey().toString()))); } } private double deploymentFailRatio() { List<Application> applications = ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList(); if (applications.isEmpty()) return 0; return (double) applications.stream().filter(a -> a.deploymentJobs().hasFailures()).count() / (double) applications.size(); } private Map<ApplicationId, Duration> averageDeploymentDurations() { Instant now = clock.instant(); return ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList() .stream() .collect(Collectors.toMap(Application::id, application -> averageDeploymentDuration(application, now))); } private void keepNodesWithSystem(PartialNodeResult nodeResult, SystemName system) { nodeResult.rows.removeIf(node -> !system.name().equals(node.getValue("system").orElse("main"))); } }
class MetricsReporter extends Maintainer { public static final String convergeMetric = "seconds.since.last.chef.convergence"; public static final String deploymentFailMetric = "deployment.failurePercentage"; public static final String deploymentAverageDuration = "deployment.averageDuration"; public static final String remainingRotations = "remaining_rotations"; private final Metric metric; private final Chef chefClient; private final Clock clock; private final SystemName system; public MetricsReporter(Controller controller, Metric metric, Chef chefClient, JobControl jobControl, SystemName system) { this(controller, metric, chefClient, Clock.systemUTC(), jobControl, system); } public MetricsReporter(Controller controller, Metric metric, Chef chefClient, Clock clock, JobControl jobControl, SystemName system) { super(controller, Duration.ofMinutes(1), jobControl); this.metric = metric; this.chefClient = chefClient; this.clock = clock; this.system = system; } @Override public void maintain() { reportChefMetrics(); reportDeploymentMetrics(); reportRemainingRotations(); } private void reportRemainingRotations() { try (RotationLock lock = controller().applications().rotationRepository().lock()) { int availableRotations = controller().applications().rotationRepository().availableRotations(lock).size(); metric.set(remainingRotations, availableRotations, metric.createContext(Collections.emptyMap())); } } private void reportChefMetrics() { String query = "chef_environment:hosted*"; if (system == SystemName.cd) { query += " AND hosted_system:" + system; } PartialNodeResult nodeResult = chefClient.partialSearchNodes(query, Arrays.asList( AttributeMapping.simpleMapping("fqdn"), AttributeMapping.simpleMapping("ohai_time"), AttributeMapping.deepMapping("tenant", Arrays.asList("hosted", "owner", "tenant")), AttributeMapping.deepMapping("application", Arrays.asList("hosted", "owner", "application")), AttributeMapping.deepMapping("instance", Arrays.asList("hosted", "owner", "instance")), AttributeMapping.deepMapping("environment", Arrays.asList("hosted", "environment")), AttributeMapping.deepMapping("region", Arrays.asList("hosted", "region")), AttributeMapping.deepMapping("system", Arrays.asList("hosted", "system")) )); keepNodesWithSystem(nodeResult, system); Instant instant = clock.instant(); for (PartialNode node : nodeResult.rows) { String hostname = node.getFqdn(); long secondsSinceConverge = Duration.between(Instant.ofEpochSecond(node.getOhaiTime().longValue()), instant).getSeconds(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("host", hostname); dimensions.put("system", node.getValue("system").orElse("main")); Optional<String> environment = node.getValue("environment"); Optional<String> region = node.getValue("region"); if(environment.isPresent() && region.isPresent()) { dimensions.put("zone", String.format("%s.%s", environment.get(), region.get())); } node.getValue("tenant").ifPresent(tenant -> dimensions.put("tenantName", tenant)); Optional<String> application = node.getValue("application"); if (application.isPresent()) { dimensions.put("app",String.format("%s.%s", application.get(), node.getValue("instance").orElse("default"))); } Metric.Context context = metric.createContext(dimensions); metric.set(convergeMetric, secondsSinceConverge, context); } } private void reportDeploymentMetrics() { metric.set(deploymentFailMetric, deploymentFailRatio() * 100, metric.createContext(Collections.emptyMap())); for (Map.Entry<ApplicationId, Duration> entry : averageDeploymentDurations().entrySet()) { metric.set(deploymentAverageDuration, entry.getValue().getSeconds(), metric.createContext(Collections.singletonMap("application", entry.getKey().toString()))); } } private double deploymentFailRatio() { List<Application> applications = ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList(); if (applications.isEmpty()) return 0; return (double) applications.stream().filter(a -> a.deploymentJobs().hasFailures()).count() / (double) applications.size(); } private Map<ApplicationId, Duration> averageDeploymentDurations() { Instant now = clock.instant(); return ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList() .stream() .collect(Collectors.toMap(Application::id, application -> averageDeploymentDuration(application, now))); } private void keepNodesWithSystem(PartialNodeResult nodeResult, SystemName system) { nodeResult.rows.removeIf(node -> !system.name().equals(node.getValue("system").orElse("main"))); } }
Done.
private Duration averageDeploymentDuration(Application application, Instant now) { List<Duration> jobDurations = application.deploymentJobs().jobStatus().values().stream() .filter(status -> status.lastTriggered().isPresent()) .map(status -> { Instant triggeredAt = status.lastTriggered().get().at(); Instant completedAt = status.lastCompleted() .map(JobStatus.JobRun::at) .filter(at -> at.isAfter(triggeredAt)) .orElse(now); return Duration.between(triggeredAt, completedAt); }) .collect(Collectors.toList()); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); }
Instant completedAt = status.lastCompleted()
private Duration averageDeploymentDuration(Application application, Instant now) { List<Duration> jobDurations = application.deploymentJobs().jobStatus().values().stream() .filter(status -> status.lastTriggered().isPresent()) .map(status -> { Instant triggeredAt = status.lastTriggered().get().at(); Instant runningUntil = status.lastCompleted() .map(JobStatus.JobRun::at) .filter(at -> at.isAfter(triggeredAt)) .orElse(now); return Duration.between(triggeredAt, runningUntil); }) .collect(Collectors.toList()); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); }
class MetricsReporter extends Maintainer { public static final String convergeMetric = "seconds.since.last.chef.convergence"; public static final String deploymentFailMetric = "deployment.failurePercentage"; public static final String deploymentAverageDuration = "deployment.averageDuration"; public static final String remainingRotations = "remaining_rotations"; private final Metric metric; private final Chef chefClient; private final Clock clock; private final SystemName system; public MetricsReporter(Controller controller, Metric metric, Chef chefClient, JobControl jobControl, SystemName system) { this(controller, metric, chefClient, Clock.systemUTC(), jobControl, system); } public MetricsReporter(Controller controller, Metric metric, Chef chefClient, Clock clock, JobControl jobControl, SystemName system) { super(controller, Duration.ofMinutes(1), jobControl); this.metric = metric; this.chefClient = chefClient; this.clock = clock; this.system = system; } @Override public void maintain() { reportChefMetrics(); reportDeploymentMetrics(); reportRemainingRotations(); } private void reportRemainingRotations() { try (RotationLock lock = controller().applications().rotationRepository().lock()) { int availableRotations = controller().applications().rotationRepository().availableRotations(lock).size(); metric.set(remainingRotations, availableRotations, metric.createContext(Collections.emptyMap())); } } private void reportChefMetrics() { String query = "chef_environment:hosted*"; if (system == SystemName.cd) { query += " AND hosted_system:" + system; } PartialNodeResult nodeResult = chefClient.partialSearchNodes(query, Arrays.asList( AttributeMapping.simpleMapping("fqdn"), AttributeMapping.simpleMapping("ohai_time"), AttributeMapping.deepMapping("tenant", Arrays.asList("hosted", "owner", "tenant")), AttributeMapping.deepMapping("application", Arrays.asList("hosted", "owner", "application")), AttributeMapping.deepMapping("instance", Arrays.asList("hosted", "owner", "instance")), AttributeMapping.deepMapping("environment", Arrays.asList("hosted", "environment")), AttributeMapping.deepMapping("region", Arrays.asList("hosted", "region")), AttributeMapping.deepMapping("system", Arrays.asList("hosted", "system")) )); keepNodesWithSystem(nodeResult, system); Instant instant = clock.instant(); for (PartialNode node : nodeResult.rows) { String hostname = node.getFqdn(); long secondsSinceConverge = Duration.between(Instant.ofEpochSecond(node.getOhaiTime().longValue()), instant).getSeconds(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("host", hostname); dimensions.put("system", node.getValue("system").orElse("main")); Optional<String> environment = node.getValue("environment"); Optional<String> region = node.getValue("region"); if(environment.isPresent() && region.isPresent()) { dimensions.put("zone", String.format("%s.%s", environment.get(), region.get())); } node.getValue("tenant").ifPresent(tenant -> dimensions.put("tenantName", tenant)); Optional<String> application = node.getValue("application"); if (application.isPresent()) { dimensions.put("app",String.format("%s.%s", application.get(), node.getValue("instance").orElse("default"))); } Metric.Context context = metric.createContext(dimensions); metric.set(convergeMetric, secondsSinceConverge, context); } } private void reportDeploymentMetrics() { metric.set(deploymentFailMetric, deploymentFailRatio() * 100, metric.createContext(Collections.emptyMap())); for (Map.Entry<ApplicationId, Duration> entry : averageDeploymentDurations().entrySet()) { metric.set(deploymentAverageDuration, entry.getValue().getSeconds(), metric.createContext(Collections.singletonMap("application", entry.getKey().toString()))); } } private double deploymentFailRatio() { List<Application> applications = ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList(); if (applications.isEmpty()) return 0; return (double) applications.stream().filter(a -> a.deploymentJobs().hasFailures()).count() / (double) applications.size(); } private Map<ApplicationId, Duration> averageDeploymentDurations() { Instant now = clock.instant(); return ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList() .stream() .collect(Collectors.toMap(Application::id, application -> averageDeploymentDuration(application, now))); } private void keepNodesWithSystem(PartialNodeResult nodeResult, SystemName system) { nodeResult.rows.removeIf(node -> !system.name().equals(node.getValue("system").orElse("main"))); } }
class MetricsReporter extends Maintainer { public static final String convergeMetric = "seconds.since.last.chef.convergence"; public static final String deploymentFailMetric = "deployment.failurePercentage"; public static final String deploymentAverageDuration = "deployment.averageDuration"; public static final String remainingRotations = "remaining_rotations"; private final Metric metric; private final Chef chefClient; private final Clock clock; private final SystemName system; public MetricsReporter(Controller controller, Metric metric, Chef chefClient, JobControl jobControl, SystemName system) { this(controller, metric, chefClient, Clock.systemUTC(), jobControl, system); } public MetricsReporter(Controller controller, Metric metric, Chef chefClient, Clock clock, JobControl jobControl, SystemName system) { super(controller, Duration.ofMinutes(1), jobControl); this.metric = metric; this.chefClient = chefClient; this.clock = clock; this.system = system; } @Override public void maintain() { reportChefMetrics(); reportDeploymentMetrics(); reportRemainingRotations(); } private void reportRemainingRotations() { try (RotationLock lock = controller().applications().rotationRepository().lock()) { int availableRotations = controller().applications().rotationRepository().availableRotations(lock).size(); metric.set(remainingRotations, availableRotations, metric.createContext(Collections.emptyMap())); } } private void reportChefMetrics() { String query = "chef_environment:hosted*"; if (system == SystemName.cd) { query += " AND hosted_system:" + system; } PartialNodeResult nodeResult = chefClient.partialSearchNodes(query, Arrays.asList( AttributeMapping.simpleMapping("fqdn"), AttributeMapping.simpleMapping("ohai_time"), AttributeMapping.deepMapping("tenant", Arrays.asList("hosted", "owner", "tenant")), AttributeMapping.deepMapping("application", Arrays.asList("hosted", "owner", "application")), AttributeMapping.deepMapping("instance", Arrays.asList("hosted", "owner", "instance")), AttributeMapping.deepMapping("environment", Arrays.asList("hosted", "environment")), AttributeMapping.deepMapping("region", Arrays.asList("hosted", "region")), AttributeMapping.deepMapping("system", Arrays.asList("hosted", "system")) )); keepNodesWithSystem(nodeResult, system); Instant instant = clock.instant(); for (PartialNode node : nodeResult.rows) { String hostname = node.getFqdn(); long secondsSinceConverge = Duration.between(Instant.ofEpochSecond(node.getOhaiTime().longValue()), instant).getSeconds(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("host", hostname); dimensions.put("system", node.getValue("system").orElse("main")); Optional<String> environment = node.getValue("environment"); Optional<String> region = node.getValue("region"); if(environment.isPresent() && region.isPresent()) { dimensions.put("zone", String.format("%s.%s", environment.get(), region.get())); } node.getValue("tenant").ifPresent(tenant -> dimensions.put("tenantName", tenant)); Optional<String> application = node.getValue("application"); if (application.isPresent()) { dimensions.put("app",String.format("%s.%s", application.get(), node.getValue("instance").orElse("default"))); } Metric.Context context = metric.createContext(dimensions); metric.set(convergeMetric, secondsSinceConverge, context); } } private void reportDeploymentMetrics() { metric.set(deploymentFailMetric, deploymentFailRatio() * 100, metric.createContext(Collections.emptyMap())); for (Map.Entry<ApplicationId, Duration> entry : averageDeploymentDurations().entrySet()) { metric.set(deploymentAverageDuration, entry.getValue().getSeconds(), metric.createContext(Collections.singletonMap("application", entry.getKey().toString()))); } } private double deploymentFailRatio() { List<Application> applications = ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList(); if (applications.isEmpty()) return 0; return (double) applications.stream().filter(a -> a.deploymentJobs().hasFailures()).count() / (double) applications.size(); } private Map<ApplicationId, Duration> averageDeploymentDurations() { Instant now = clock.instant(); return ApplicationList.from(controller().applications().asList()) .notPullRequest() .hasProductionDeployment() .asList() .stream() .collect(Collectors.toMap(Application::id, application -> averageDeploymentDuration(application, now))); } private void keepNodesWithSystem(PartialNodeResult nodeResult, SystemName system) { nodeResult.rows.removeIf(node -> !system.name().equals(node.getValue("system").orElse("main"))); } }
I can't see this attribute in the `content.rnc` RelaxNG schema currently. Can be added later, since current configs will still generate the expected thread count.
private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.getChild("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.getChild("persistence-threads"); if (threads == null) { return null; } Integer count = threads.getIntegerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.getIntegerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; }
Integer count = threads.getIntegerAttribute("count");
private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.getChild("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.getChild("persistence-threads"); if (threads == null) { return null; } Integer count = threads.getIntegerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.getIntegerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; }
class Builder { protected FileStorProducer build(ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(parent, getThreads(clusterElem)); } }
class Builder { protected FileStorProducer build(ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(parent, getThreads(clusterElem)); } }
Seems wrong to cancel the job if if there's still an application change rolling.
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); }
deploymentQueue.removeJobs(application.id());
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); deploymentQueue.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); deploymentQueue.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } }
Previous implementation would allow a downgrade of application if platform was an upgrade >_<
private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; }
return applicationComparison == 0 && platformComparion == 0;
private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); deploymentQueue.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.change().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(JobType.systemTest, application, false, change.toString()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason)); deploymentQueue.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, clock.instant(), application.deployVersionFor(jobType, controller), application.deployApplicationVersionFor(jobType, controller, false) .orElse(ApplicationVersion.unknown), reason); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return ! JobList.from(application) .production() .running(jobTimeoutLimit()) .isEmpty(); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } }
I suggest to delete this comment since the better comment is further down. Let's fix the issue instead in another PR. I don't really understand what you mean by 'ordering constraints' either.
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstructing of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstruction of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.currentTimeMillis()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
deconstructing -> deconstruction
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstructing of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
log.info("Finished deconstructing of provider " + component);
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstruction of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.currentTimeMillis()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
Components should be destructed in correct order in case the destructor uses other components injected in constructor. Anyway, I'll remove the comment.
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstructing of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
public void deconstruct(Object component) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { executor.schedule(new DestructComponentTask(abstractComponent), delay.getSeconds(), TimeUnit.SECONDS); } } else if (component instanceof Provider) { log.info("Starting deconstruction of provider " + component); ((Provider)component).deconstruct(); log.info("Finished deconstruction of provider " + component); } else if (component instanceof SharedResource) { log.info("Releasing container reference to resource " + component); ((SharedResource)component).release(); } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.currentTimeMillis()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Duration delay; public Deconstructor(boolean delayDeconstruction) { this.delay = delayDeconstruction ? Duration.ofSeconds(60) : Duration.ZERO; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final AbstractComponent component; DestructComponentTask(AbstractComponent component) { this.component = component; } /** * Returns a random delay betweeen 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { log.info("Starting deconstruction of component " + component); try { component.deconstruct(); log.info("Finished deconstructing of component " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(LogLevel.FATAL, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } } }
Ah, this turned out wrong, I see. `concurrentlyWith` should be all jobs here, or the force thing should make the test be skipped. I'll do the latter, I think.
private HttpResponse trigger(HttpRequest request, String tenantName, String applicationName) { JobType jobType = Optional.of(asString(request.getData())) .filter(s -> !s.isEmpty()) .map(JobType::fromJobName) .orElse(JobType.component); ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().lockOrThrow(applicationId, application -> { application = controller.applications().deploymentTrigger().triggerAllowParallel( new DeploymentTrigger.Triggering(application, jobType, true, "Triggered from screwdriver/v1"), Collections.emptySet(), true ); controller.applications().store(application); }); Slime slime = new Slime(); Cursor cursor = slime.setObject(); cursor.setString("message", "Triggered " + jobType.jobName() + " for " + applicationId); return new SlimeJsonResponse(slime); }
new DeploymentTrigger.Triggering(application, jobType, true, "Triggered from screwdriver/v1"), Collections.emptySet(), true
private HttpResponse trigger(HttpRequest request, String tenantName, String applicationName) { JobType jobType = Optional.of(asString(request.getData())) .filter(s -> !s.isEmpty()) .map(JobType::fromJobName) .orElse(JobType.component); ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().lockOrThrow(applicationId, application -> { application = controller.applications().deploymentTrigger().trigger( new DeploymentTrigger.Triggering(application, jobType, true, "Triggered from screwdriver/v1"), Collections.emptySet(), true ); controller.applications().store(application); }); Slime slime = new Slime(); Cursor cursor = slime.setObject(); cursor.setString("message", "Triggered " + jobType.jobName() + " for " + applicationId); return new SlimeJsonResponse(slime); }
class ScrewdriverApiHandler extends LoggingRequestHandler { private final static Logger log = Logger.getLogger(ScrewdriverApiHandler.class.getName()); private final Controller controller; public ScrewdriverApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) { super(parentCtx); this.controller = controller; } @Override public HttpResponse handle(HttpRequest request) { Method method = request.getMethod(); try { switch (method) { case GET: return get(request); case POST: return post(request); default: return ErrorResponse.methodNotAllowed("Method '" + method + "' is unsupported"); } } catch (IllegalArgumentException|IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/screwdriver/v1/release/vespa")) { return vespaVersion(); } if (path.matches("/screwdriver/v1/jobsToRun")) { return buildJobs(controller.applications().deploymentTrigger().deploymentQueue().jobs()); } return notFound(request); } private HttpResponse post(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/screwdriver/v1/trigger/tenant/{tenant}/application/{application}")) { return trigger(request, path.get("tenant"), path.get("application")); } return notFound(request); } private HttpResponse vespaVersion() { VespaVersion version = controller.versionStatus().version(controller.systemVersion()); if (version == null) return ErrorResponse.notFoundError("Information about the current system version is not available at this time"); Slime slime = new Slime(); Cursor cursor = slime.setObject(); cursor.setString("version", version.versionNumber().toString()); cursor.setString("sha", version.releaseCommit()); cursor.setLong("date", version.committedAt().toEpochMilli()); return new SlimeJsonResponse(slime); } private HttpResponse buildJobs(List<BuildJob> buildJobs) { Slime slime = new Slime(); Cursor buildJobArray = slime.setArray(); for (BuildJob buildJob : buildJobs) { Cursor buildJobObject = buildJobArray.addObject(); buildJobObject.setLong("projectId", buildJob.projectId()); buildJobObject.setString("jobName", buildJob.jobName()); } return new SlimeJsonResponse(slime); } private static String asString(InputStream in) { Scanner scanner = new Scanner(in).useDelimiter("\\A"); if (scanner.hasNext()) { return scanner.next(); } return ""; } private static HttpResponse notFound(HttpRequest request) { return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } }
class ScrewdriverApiHandler extends LoggingRequestHandler { private final static Logger log = Logger.getLogger(ScrewdriverApiHandler.class.getName()); private final Controller controller; public ScrewdriverApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) { super(parentCtx); this.controller = controller; } @Override public HttpResponse handle(HttpRequest request) { Method method = request.getMethod(); try { switch (method) { case GET: return get(request); case POST: return post(request); default: return ErrorResponse.methodNotAllowed("Method '" + method + "' is unsupported"); } } catch (IllegalArgumentException|IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/screwdriver/v1/release/vespa")) { return vespaVersion(); } if (path.matches("/screwdriver/v1/jobsToRun")) { return buildJobs(controller.applications().deploymentTrigger().deploymentQueue().jobs()); } return notFound(request); } private HttpResponse post(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/screwdriver/v1/trigger/tenant/{tenant}/application/{application}")) { return trigger(request, path.get("tenant"), path.get("application")); } return notFound(request); } private HttpResponse vespaVersion() { VespaVersion version = controller.versionStatus().version(controller.systemVersion()); if (version == null) return ErrorResponse.notFoundError("Information about the current system version is not available at this time"); Slime slime = new Slime(); Cursor cursor = slime.setObject(); cursor.setString("version", version.versionNumber().toString()); cursor.setString("sha", version.releaseCommit()); cursor.setLong("date", version.committedAt().toEpochMilli()); return new SlimeJsonResponse(slime); } private HttpResponse buildJobs(List<BuildJob> buildJobs) { Slime slime = new Slime(); Cursor buildJobArray = slime.setArray(); for (BuildJob buildJob : buildJobs) { Cursor buildJobObject = buildJobArray.addObject(); buildJobObject.setLong("projectId", buildJob.projectId()); buildJobObject.setString("jobName", buildJob.jobName()); } return new SlimeJsonResponse(slime); } private static String asString(InputStream in) { Scanner scanner = new Scanner(in).useDelimiter("\\A"); if (scanner.hasNext()) { return scanner.next(); } return ""; } private static HttpResponse notFound(HttpRequest request) { return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } }
This blocked getting the deployment spec through, so it had to go. It's check again later, however. I was even checked again twice, I believe, before eventual triggering ;)
public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); }
}
public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
The above blob was replaced by this :) Placeholder syntax, though -- I'll make it shine in the end.
public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } }
for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) {
public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
Any particular reason for the overloading here?
public void writeTenant(AthenzTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } }
}
public void writeTenant(AthenzTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this.curator = curator; } public Lock lock(TenantName name, Duration timeout) { return lock(lockPath(name), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lockRotations() { return lock(lockRoot.append("rotations"), defaultLockTimeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(lockRoot.append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockVespaServerPool() { return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1)); } private Optional<Slime> readSlime(Path path) { return curator.getData(path).filter(data -> data.length > 0).map(SlimeUtils::jsonToSlime); } public Set<String> readInactiveJobs() { try { return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { return readSlime(jobQueuePath(jobType)).map(jobQueueSerializer::fromSlime).orElseGet(ArrayDeque::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue of type '" + jobType.jobName() + "'; deleting it."); writeJobQueue(jobType, Collections::emptyIterator); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Iterable<ApplicationId> queue) { curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if ( ! n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { try { curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(versionStatusSerializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { try { curator.set(confidenceOverridesPath(), SlimeUtils.toJsonBytes(confidenceOverrideSerializer.toSlime(overrides))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize confidence overrides", e); } } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeTenant(UserTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } } public Optional<UserTenant> readUserTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom); } public Optional<AthenzTenant> readAthenzTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom); } public Optional<Tenant> readTenant(TenantName name) { if (name.value().startsWith(Tenant.userPrefix)) { return readUserTenant(name).map(Tenant.class::cast); } return readAthenzTenant(name).map(Tenant.class::cast); } public List<Tenant> readTenants() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .map(this::readTenant) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } @SuppressWarnings("unused") public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } @SuppressWarnings("unused") public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } @SuppressWarnings("unused") public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } @SuppressWarnings("unused") public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path lockPath(TenantName tenant) { Path lockPath = lockRoot .append(tenant.value()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = lockRoot .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = lockRoot .append(provisionStatePath()) .append(provisionId); curator.create(lockPath); return lockPath; } private static Path inactiveJobsPath() { return root.append("inactiveJobs"); } private static Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private static Path openStackServerPoolPath() { return root.append("openStackServerPool"); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this.curator = curator; } public Lock lock(TenantName name, Duration timeout) { return lock(lockPath(name), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lockRotations() { return lock(lockRoot.append("rotations"), defaultLockTimeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(lockRoot.append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockVespaServerPool() { return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1)); } private Optional<Slime> readSlime(Path path) { return curator.getData(path).filter(data -> data.length > 0).map(SlimeUtils::jsonToSlime); } public Set<String> readInactiveJobs() { try { return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { return readSlime(jobQueuePath(jobType)).map(jobQueueSerializer::fromSlime).orElseGet(ArrayDeque::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue of type '" + jobType.jobName() + "'; deleting it."); writeJobQueue(jobType, Collections::emptyIterator); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Iterable<ApplicationId> queue) { curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if ( ! n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { try { curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(versionStatusSerializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { try { curator.set(confidenceOverridesPath(), SlimeUtils.toJsonBytes(confidenceOverrideSerializer.toSlime(overrides))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize confidence overrides", e); } } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeTenant(UserTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } } public Optional<UserTenant> readUserTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom); } public Optional<AthenzTenant> readAthenzTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom); } public Optional<Tenant> readTenant(TenantName name) { if (name.value().startsWith(Tenant.userPrefix)) { return readUserTenant(name).map(Tenant.class::cast); } return readAthenzTenant(name).map(Tenant.class::cast); } public List<Tenant> readTenants() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .map(this::readTenant) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } @SuppressWarnings("unused") public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } @SuppressWarnings("unused") public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } @SuppressWarnings("unused") public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } @SuppressWarnings("unused") public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path lockPath(TenantName tenant) { Path lockPath = lockRoot .append(tenant.value()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = lockRoot .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = lockRoot .append(provisionStatePath()) .append(provisionId); curator.create(lockPath); return lockPath; } private static Path inactiveJobsPath() { return root.append("inactiveJobs"); } private static Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private static Path openStackServerPoolPath() { return root.append("openStackServerPool"); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } }
Checked again later.
public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } }
for (JobType nextJobType : nextJobs) {
public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
OK, I'm starting to like this indentation.
public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); }
triggering.reason);
public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
It would be great if we could store in only one place...
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); }
return;
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
Yes! I'll work towards that.
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); }
return;
public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = applicationVersionFrom(report); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component) { if (report.success()) { if ( ! acceptNewApplicationVersionNow(application)) { applications().store(application.withOutstandingChange(Change.of(applicationVersion))); return; } application = application.withChange(application.change().with(applicationVersion)); } else { applications().store(application); return; } } else if (report.jobType().isProduction() && deploymentComplete(application)) { application = application.withChange(Change.empty()); } if (report.success()) { triggerReadyJobs(application); return; } else if (retryBecauseOutOfCapacity(application, report.jobType())) { triggerReadyJobs(application); return; } else if (retryBecauseNewFailure(application, report.jobType())) { triggerReadyJobs(application); return; } applications().store(application); }); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.change().platform().isPresent()) { Version target = application.change().platform().get(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(new Triggering(application, JobType.systemTest, false, "Upgrade to " + target), Collections.emptySet(), false); applications().store(application); } } } for (JobType jobType : (Iterable<JobType>) Stream.concat(Stream.of(JobType.component), jobs.stream())::iterator) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; List<JobType> nextJobs = order.nextAfter(jobType, application); for (JobType nextJobType : nextJobs) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) { boolean isRetry = nextStatus != null && nextStatus.jobError().filter(JobError.outOfCapacity::equals).isPresent(); application = trigger(new Triggering(application, nextJobType, isRetry, isRetry ? "Retrying on out of capacity" : "Available change in " + jobType.jobName()), nextJobs, false); } } applications().store(application); } } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @param concurrentlyWith production jobs that may run concurrently with the job to trigger * @param force true to disable checks which should normally prevent this triggering from happening * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, Collection<JobType> concurrentlyWith, boolean force) { if (triggering.jobType == null) return triggering.application; List<JobType> runningProductionJobs = JobList.from(triggering.application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type); if ( ! force && triggering.jobType().isProduction() && ! concurrentlyWith.containsAll(runningProductionJobs)) return triggering.application; if ( ! triggering.application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), triggering.application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, triggering.application, triggering.reason)); return triggering.application; } if ( ! force && ! allowedTriggering(triggering.jobType, triggering.application)) return triggering.application; log.info(triggering.toString()); deploymentQueue.addJob(triggering.application.id(), triggering.jobType, triggering.retry); return triggering.application.withJobTriggering(triggering.jobType, clock.instant(), triggering.application.deployVersionFor(triggering.jobType, controller), triggering.application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); application = trigger(new Triggering(application, JobType.systemTest, false, change.toString()), Collections.emptySet(), false); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** Create application version from job report */ private ApplicationVersion applicationVersionFrom(JobReport report) { return report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.change().isPresent() && application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (jobType.isProduction() && changeDeployed(application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } /** * Returns true if the previous job has completed successfully with a application version and/or Vespa version * which is newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.change().isPresent()) return false; if (next == null) return true; if (next.type().isTest()) { if ( ! lastSuccessfulIs(application.change(), previous.type(), application)) return false; if (lastSuccessfulIs(application.change(), next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(application.change(), JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! changeDeployed(application, previous.type())) return false; if (changeDeployed(application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .filter(job -> job.zone(controller.system()).isPresent()) .allMatch(job -> changeDeployed(application, job)); } /** * Returns whether the given application should skip deployment of its current change to the given production job zone. * * If the currently deployed application has a newer platform or application version than the application's * current change, the method returns {@code true}, to avoid a downgrade. * Otherwise, it returns whether the current change is redundant, i.e., all its components are already deployed. */ private boolean changeDeployed(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return false; int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparion = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparion == -1) return true; return applicationComparison == 0 && platformComparion == 0; } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Change change, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; if (change.platform().isPresent() && ! change.platform().get().equals(lastSuccessfulRun.get().version())) return false; if (change.application().isPresent() && ! change.application().get().equals(lastSuccessfulRun.get().applicationVersion())) return false; return true; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; public Triggering(LockedApplication application, JobType jobType, boolean retry, String reason) { this.application = application; this.jobType = jobType; this.retry = retry; this.reason = reason; } public LockedApplication application() { return application; } public JobType jobType() { return jobType; } public boolean isRetry() { return retry; } public String reason() { return reason; } public String toString() { return String.format("Triggering %s for %s, %s: %s", jobType, application, application.change().isPresent() ? "deploying " + application.change() : "restarted deployment", reason); } } }
Oh, IntelliJ ...
public DeploymentOrder deploymentOrder() { return order; }
}
public DeploymentOrder deploymentOrder() { return order; }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); } private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparison == -1) return Optional.of(deployment.at()); return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller::system); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); } private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); return Optional.of(deployment.at()) .filter(ignored -> applicationComparison == -1 || platformComparison == -1 || (applicationComparison == 0 && platformComparison == 0)); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
Ready to move this out, next.
private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); }
application = trigger(triggering, application);
private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparison == -1) return Optional.of(deployment.at()); return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller::system); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); return Optional.of(deployment.at()) .filter(ignored -> applicationComparison == -1 || platformComparison == -1 || (applicationComparison == 0 && platformComparison == 0)); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
Ah, overloaded in the serialiser as well.
public void writeTenant(AthenzTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } }
}
public void writeTenant(AthenzTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this.curator = curator; } public Lock lock(TenantName name, Duration timeout) { return lock(lockPath(name), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lockRotations() { return lock(lockRoot.append("rotations"), defaultLockTimeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(lockRoot.append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockVespaServerPool() { return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1)); } private Optional<Slime> readSlime(Path path) { return curator.getData(path).filter(data -> data.length > 0).map(SlimeUtils::jsonToSlime); } public Set<String> readInactiveJobs() { try { return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { return readSlime(jobQueuePath(jobType)).map(jobQueueSerializer::fromSlime).orElseGet(ArrayDeque::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue of type '" + jobType.jobName() + "'; deleting it."); writeJobQueue(jobType, Collections::emptyIterator); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Iterable<ApplicationId> queue) { curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if ( ! n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { try { curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(versionStatusSerializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { try { curator.set(confidenceOverridesPath(), SlimeUtils.toJsonBytes(confidenceOverrideSerializer.toSlime(overrides))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize confidence overrides", e); } } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeTenant(UserTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } } public Optional<UserTenant> readUserTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom); } public Optional<AthenzTenant> readAthenzTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom); } public Optional<Tenant> readTenant(TenantName name) { if (name.value().startsWith(Tenant.userPrefix)) { return readUserTenant(name).map(Tenant.class::cast); } return readAthenzTenant(name).map(Tenant.class::cast); } public List<Tenant> readTenants() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .map(this::readTenant) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } @SuppressWarnings("unused") public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } @SuppressWarnings("unused") public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } @SuppressWarnings("unused") public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } @SuppressWarnings("unused") public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path lockPath(TenantName tenant) { Path lockPath = lockRoot .append(tenant.value()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = lockRoot .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = lockRoot .append(provisionStatePath()) .append(provisionId); curator.create(lockPath); return lockPath; } private static Path inactiveJobsPath() { return root.append("inactiveJobs"); } private static Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private static Path openStackServerPoolPath() { return root.append("openStackServerPool"); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this.curator = curator; } public Lock lock(TenantName name, Duration timeout) { return lock(lockPath(name), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lockRotations() { return lock(lockRoot.append("rotations"), defaultLockTimeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(lockRoot.append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockVespaServerPool() { return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1)); } private Optional<Slime> readSlime(Path path) { return curator.getData(path).filter(data -> data.length > 0).map(SlimeUtils::jsonToSlime); } public Set<String> readInactiveJobs() { try { return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { return readSlime(jobQueuePath(jobType)).map(jobQueueSerializer::fromSlime).orElseGet(ArrayDeque::new); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue of type '" + jobType.jobName() + "'; deleting it."); writeJobQueue(jobType, Collections::emptyIterator); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Iterable<ApplicationId> queue) { curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if ( ! n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { try { curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(versionStatusSerializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { try { curator.set(confidenceOverridesPath(), SlimeUtils.toJsonBytes(confidenceOverrideSerializer.toSlime(overrides))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize confidence overrides", e); } } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeTenant(UserTenant tenant) { try { curator.set(tenantPath(tenant.name()), SlimeUtils.toJsonBytes(tenantSerializer.toSlime(tenant))); } catch (IOException e) { throw new UncheckedIOException("Failed to write " + tenant.toString(), e); } } public Optional<UserTenant> readUserTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom); } public Optional<AthenzTenant> readAthenzTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom); } public Optional<Tenant> readTenant(TenantName name) { if (name.value().startsWith(Tenant.userPrefix)) { return readUserTenant(name).map(Tenant.class::cast); } return readAthenzTenant(name).map(Tenant.class::cast); } public List<Tenant> readTenants() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .map(this::readTenant) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } @SuppressWarnings("unused") public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } @SuppressWarnings("unused") public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } @SuppressWarnings("unused") public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } @SuppressWarnings("unused") public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path lockPath(TenantName tenant) { Path lockPath = lockRoot .append(tenant.value()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = lockRoot .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = lockRoot .append(provisionStatePath()) .append(provisionId); curator.create(lockPath); return lockPath; } private static Path inactiveJobsPath() { return root.append("inactiveJobs"); } private static Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private static Path openStackServerPoolPath() { return root.append("openStackServerPool"); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } }
Moved to upper level.
private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; }
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); } private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparison == -1) return Optional.of(deployment.at()); return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller::system); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); } private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); return Optional.of(deployment.at()) .filter(ignored -> applicationComparison == -1 || platformComparison == -1 || (applicationComparison == 0 && platformComparison == 0)); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
Can't see this is relevant any more.
private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); }
LockedApplication app = application;
private void triggerReadyJobs(LockedApplication application) { List<Triggering> triggerings = new ArrayList<>(); Change change = application.change(); List<DeploymentSpec.Step> steps = application.deploymentSpec().steps(); if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test)); Optional<Instant> completedAt = Optional.of(clock.instant()); String reason = "Deploying " + change.toString(); for (DeploymentSpec.Step step : steps) { LockedApplication app = application; Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet()); if (remainingJobs.isEmpty()) { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } else if (completedAt.isPresent()) { for (JobType job : remainingJobs) triggerings.add(new Triggering(app, job, reason, stepJobs)); completedAt = Optional.empty(); } } if (completedAt.isPresent()) application = application.withChange(Change.empty()); for (Triggering triggering : triggerings) if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application)) application = trigger(triggering, application); applications().store(application); }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); if (applicationComparison == -1 || platformComparison == -1) return Optional.of(deployment.at()); return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }
class DeploymentTrigger { /** * The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentQueue deploymentQueue; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.deploymentQueue = new DeploymentQueue(controller, curator); this.order = new DeploymentOrder(controller::system); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** * Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public DeploymentQueue deploymentQueue() { return deploymentQueue; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to record information used when deciding what to trigger. * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber())) .orElse(ApplicationVersion.unknown); application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller); application = application.withProjectId(report.projectId()); if (report.jobType() == JobType.component && report.success()) { if ( ! acceptNewApplicationVersionNow(application)) application = application.withOutstandingChange(Change.of(applicationVersion)); else application = application.withChange(application.change().with(applicationVersion)); } applications().store(application); }); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest() .withProjectId() .deploying(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** * Trigger a job for an application, if allowed * * @param triggering the triggering to execute, i.e., application, job type and reason * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication trigger(Triggering triggering, LockedApplication application) { if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType, application, triggering.reason)); return application; } log.info(triggering.toString()); deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry); return application.withJobTriggering(triggering.jobType, clock.instant(), application.deployVersionFor(triggering.jobType, controller), application.deployApplicationVersionFor(triggering.jobType, controller, false) .orElse(ApplicationVersion.unknown), triggering.reason); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && !application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .map(Change::of) .filter(change -> keepApplicationChange) .orElse(Change.empty()))); if ( ! applications().require(applicationId).change().isPresent()) deploymentQueue.removeJobs(application.id()); }); } /** * Finds the next step to trigger for the given application, if any, and triggers it */ private Optional<Instant> completedAt(Application application, JobType jobType) { return jobType.isProduction() ? changeCompletedAt(application, jobType) : application.deploymentJobs().successAt(application.change(), jobType); } private boolean allowedToTriggerNow(Triggering triggering, Application application) { if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit())) return false; if ( ! triggering.jobType.isProduction()) return true; if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application) .production() .running(jobTimeoutLimit()) .mapToList(JobStatus::type))) return false; if (application.change().blockedBy(application.deploymentSpec(), clock.instant())) return false; return true; } private ApplicationController applications() { return controller.applications(); } /** Returns the instant when the given application's current change was completed for the given job. */ private Optional<Instant> changeCompletedAt(Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); Deployment deployment = application.deployments().get(job.zone(controller.system()).get()); if (deployment == null) return Optional.empty(); int applicationComparison = application.change().application() .map(version -> version.compareTo(deployment.applicationVersion())) .orElse(0); int platformComparison = application.change().platform() .map(version -> version.compareTo(deployment.version())) .orElse(0); return Optional.of(deployment.at()) .filter(ignored -> applicationComparison == -1 || platformComparison == -1 || (applicationComparison == 0 && platformComparison == 0)); } private boolean acceptNewApplicationVersionNow(LockedApplication application) { if ( ! application.change().isPresent()) return true; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } public static class Triggering { private final LockedApplication application; private final JobType jobType; private final boolean retry; private final String reason; private final Collection<JobType> concurrentlyWith; public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) { this.application = application; this.jobType = jobType; this.concurrentlyWith = concurrentlyWith; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent(); this.reason = retry ? "Retrying on out of capacity" : reason; } public Triggering(LockedApplication application, JobType jobType, String reason) { this(application, jobType, reason, Collections.emptySet()); } public String toString() { return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason); } } }