index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/TestServiceMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.codahale.metrics.Metric; import org.apache.gobblin.metrics.metric.filter.MetricNameRegexFilter; import org.testng.Assert; import org.testng.annotations.Test; import static org.mockito.Mockito.*; @Test public class TestServiceMetrics { @Test public void matchesTest() { MetricNameRegexFilter metricNameRegexForDagManager = DagManagerMetrics.getMetricsFilterForDagManager(); Assert.assertTrue(metricNameRegexForDagManager.matches("GobblinService.testGroup.testFlow.RunningStatus", mock(Metric.class))); Assert.assertTrue(metricNameRegexForDagManager.matches("GobblinService.test..RunningStatus", mock(Metric.class))); Assert.assertFalse(metricNameRegexForDagManager.matches("test3.RunningStatus", mock(Metric.class))); Assert.assertFalse(metricNameRegexForDagManager.matches("GobblinService.test4RunningStatus", mock(Metric.class))); Assert.assertFalse(metricNameRegexForDagManager.matches("GobblinServicetest5.RunningStatus", mock(Metric.class))); } }
3,800
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/InMemoryUserQuotaManagerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import java.io.IOException; import java.util.Collections; import java.util.List; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; public class InMemoryUserQuotaManagerTest { InMemoryUserQuotaManager _quotaManager; @BeforeClass public void setUp() { Config quotaConfig = ConfigFactory.empty() .withValue(AbstractUserQuotaManager.PER_USER_QUOTA, ConfigValueFactory.fromAnyRef("user:1,user2:1,user3:1,user6:1")) .withValue(AbstractUserQuotaManager.PER_FLOWGROUP_QUOTA, ConfigValueFactory.fromAnyRef("group1:1,group2:2")); this._quotaManager = new InMemoryUserQuotaManager(quotaConfig); } // Tests that if exceeding the quota on startup, do not throw an exception and do not decrement the counter @Test public void testExceedsQuotaOnStartup() throws Exception { List<Dag<JobExecutionPlan>> dags = DagManagerTest.buildDagList(2, "user", ConfigFactory.empty()); // Ensure that the current attempt is 1, normally done by DagManager dags.get(0).getNodes().get(0).getValue().setCurrentAttempts(1); dags.get(1).getNodes().get(0).getValue().setCurrentAttempts(1); // Should not be throwing the exception this._quotaManager.init(dags); } @Test public void testExceedsUserQuotaThrowsException() throws Exception { List<Dag<JobExecutionPlan>> dags = DagManagerTest.buildDagList(2, "user2", ConfigFactory.empty()); // Ensure that the current attempt is 1, normally done by DagManager dags.get(0).getNodes().get(0).getValue().setCurrentAttempts(1); dags.get(1).getNodes().get(0).getValue().setCurrentAttempts(1); this._quotaManager.checkQuota(Collections.singleton(dags.get(0).getNodes().get(0))); Assert.assertThrows(IOException.class, () -> { this._quotaManager.checkQuota(Collections.singleton(dags.get(1).getNodes().get(0))); }); } @Test public void testMultipleRemoveQuotasIdempotent() throws Exception { // Test that multiple decrements cannot cause the number to decrease by more than 1 List<Dag<JobExecutionPlan>> dags = DagManagerTest.buildDagList(2, "user3", ConfigFactory.empty()); // Ensure that the current attempt is 1, normally done by DagManager dags.get(0).getNodes().get(0).getValue().setCurrentAttempts(1); dags.get(1).getNodes().get(0).getValue().setCurrentAttempts(1); this._quotaManager.checkQuota(Collections.singleton(dags.get(0).getNodes().get(0))); Assert.assertTrue(this._quotaManager.releaseQuota(dags.get(0).getNodes().get(0))); Assert.assertFalse(this._quotaManager.releaseQuota(dags.get(0).getNodes().get(0))); } @Test public void testExceedsFlowGroupQuotaThrowsException() throws Exception { // Test flowgroup quotas List<Dag<JobExecutionPlan>> dags = DagManagerTest.buildDagList(2, "user4", ConfigFactory.empty().withValue( ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("group1"))); // Ensure that the current attempt is 1, normally done by DagManager dags.get(0).getNodes().get(0).getValue().setCurrentAttempts(1); dags.get(1).getNodes().get(0).getValue().setCurrentAttempts(1); this._quotaManager.checkQuota(Collections.singleton(dags.get(0).getNodes().get(0))); Assert.assertThrows(IOException.class, () -> { this._quotaManager.checkQuota(Collections.singleton(dags.get(1).getNodes().get(0))); }); } @Test public void testUserAndFlowGroupQuotaMultipleUsersAdd() throws Exception { // Test that user quota and group quotas can both be exceeded, and that decrementing one flow will change both quotas Dag<JobExecutionPlan> dag1 = DagManagerTest.buildDag("1", System.currentTimeMillis(),DagManager.FailureOption.FINISH_ALL_POSSIBLE.name(), 1, "user5", ConfigFactory.empty().withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("group2"))); Dag<JobExecutionPlan> dag2 = DagManagerTest.buildDag("2", System.currentTimeMillis(),DagManager.FailureOption.FINISH_ALL_POSSIBLE.name(), 1, "user6", ConfigFactory.empty().withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("group2"))); Dag<JobExecutionPlan> dag3 = DagManagerTest.buildDag("3", System.currentTimeMillis(),DagManager.FailureOption.FINISH_ALL_POSSIBLE.name(), 1, "user6", ConfigFactory.empty().withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("group3"))); Dag<JobExecutionPlan> dag4 = DagManagerTest.buildDag("4", System.currentTimeMillis(),DagManager.FailureOption.FINISH_ALL_POSSIBLE.name(), 1, "user5", ConfigFactory.empty().withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("group2"))); // Ensure that the current attempt is 1, normally done by DagManager dag1.getNodes().get(0).getValue().setCurrentAttempts(1); dag2.getNodes().get(0).getValue().setCurrentAttempts(1); dag3.getNodes().get(0).getValue().setCurrentAttempts(1); dag4.getNodes().get(0).getValue().setCurrentAttempts(1); this._quotaManager.checkQuota(Collections.singleton(dag1.getNodes().get(0))); this._quotaManager.checkQuota(Collections.singleton(dag2.getNodes().get(0))); // Should fail due to user quota Assert.assertThrows(IOException.class, () -> { this._quotaManager.checkQuota(Collections.singleton(dag3.getNodes().get(0))); }); // Should fail due to flowgroup quota Assert.assertThrows(IOException.class, () -> { this._quotaManager.checkQuota(Collections.singleton(dag4.getNodes().get(0))); }); // should pass due to quota being released this._quotaManager.releaseQuota(dag2.getNodes().get(0)); this._quotaManager.checkQuota(Collections.singleton(dag3.getNodes().get(0))); this._quotaManager.checkQuota(Collections.singleton(dag4.getNodes().get(0))); } }
3,801
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/OrchestratorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.codahale.metrics.MetricRegistry; import com.google.common.base.Optional; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.typesafe.config.Config; import java.io.File; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; import java.util.List; import java.util.Properties; import org.apache.commons.io.FileUtils; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecCatalogListener; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog; import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor; import org.apache.gobblin.service.modules.flow.IdentityFlowToJobSpecCompiler; import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import static org.mockito.Mockito.*; public class OrchestratorTest { private static final Logger logger = LoggerFactory.getLogger(TopologyCatalog.class); private static Gson gson = new GsonBuilder().setPrettyPrinting().create(); private static final String SPEC_STORE_PARENT_DIR = "/tmp/orchestrator/"; private static final String SPEC_DESCRIPTION = "Test Orchestrator"; private static final String SPEC_VERSION = FlowSpec.Builder.DEFAULT_VERSION; private static final String TOPOLOGY_SPEC_STORE_DIR = "/tmp/orchestrator/topologyTestSpecStore"; private static final String FLOW_SPEC_STORE_DIR = "/tmp/orchestrator/flowTestSpecStore"; private static final String FLOW_SPEC_GROUP_DIR = "/tmp/orchestrator/flowTestSpecStore/flowTestGroupDir"; private ServiceBasedAppLauncher serviceLauncher; private TopologyCatalog topologyCatalog; private TopologySpec topologySpec; private FlowCatalog flowCatalog; private SpecCatalogListener mockListener; private FlowSpec flowSpec; private FlowStatusGenerator mockStatusGenerator; private FlowTriggerHandler _mockFlowTriggerHandler; private Orchestrator orchestrator; @BeforeClass public void setup() throws Exception { cleanUpDir(TOPOLOGY_SPEC_STORE_DIR); cleanUpDir(FLOW_SPEC_STORE_DIR); Properties orchestratorProperties = new Properties(); Properties topologyProperties = new Properties(); topologyProperties.put("specStore.fs.dir", TOPOLOGY_SPEC_STORE_DIR); Properties flowProperties = new Properties(); flowProperties.put("specStore.fs.dir", FLOW_SPEC_STORE_DIR); this.serviceLauncher = new ServiceBasedAppLauncher(orchestratorProperties, "OrchestratorCatalogTest"); this.topologyCatalog = new TopologyCatalog(ConfigUtils.propertiesToConfig(topologyProperties), Optional.of(logger)); this.serviceLauncher.addService(topologyCatalog); // Test warm standby flow catalog, which has orchestrator as listener this.flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(flowProperties), Optional.of(logger), Optional.<MetricContext>absent(), true, true); this.serviceLauncher.addService(flowCatalog); this.mockStatusGenerator = mock(FlowStatusGenerator.class); this._mockFlowTriggerHandler = mock(FlowTriggerHandler.class); this.orchestrator = new Orchestrator(ConfigUtils.propertiesToConfig(orchestratorProperties), this.mockStatusGenerator, Optional.of(this.topologyCatalog), Optional.<DagManager>absent(), Optional.of(logger), Optional.of(this._mockFlowTriggerHandler), new SharedFlowMetricsSingleton( ConfigUtils.propertiesToConfig(orchestratorProperties))); this.topologyCatalog.addListener(orchestrator); this.flowCatalog.addListener(orchestrator); // Start application this.serviceLauncher.start(); // Create Spec to play with this.topologySpec = initTopologySpec(); this.flowSpec = initFlowSpec(); } private void cleanUpDir(String dir) throws Exception { File specStoreDir = new File(dir); if (specStoreDir.exists()) { FileUtils.deleteDirectory(specStoreDir); } } private TopologySpec initTopologySpec() { Properties properties = new Properties(); properties.put("specStore.fs.dir", TOPOLOGY_SPEC_STORE_DIR); properties.put("specExecInstance.capabilities", "source:destination"); Config config = ConfigUtils.propertiesToConfig(properties); SpecExecutor specExecutorInstance = new InMemorySpecExecutor(config); TopologySpec.Builder topologySpecBuilder = TopologySpec.builder(computeTopologySpecURI(SPEC_STORE_PARENT_DIR, TOPOLOGY_SPEC_STORE_DIR)) .withConfig(config) .withDescription(SPEC_DESCRIPTION) .withVersion(SPEC_VERSION) .withSpecExecutor(specExecutorInstance); return topologySpecBuilder.build(); } private FlowSpec initFlowSpec() { Properties properties = new Properties(); String flowName = "test_flowName"; String flowGroup = "test_flowGroup"; properties.put(ConfigurationKeys.FLOW_NAME_KEY, flowName); properties.put(ConfigurationKeys.FLOW_GROUP_KEY, flowGroup); properties.put("job.name", flowName); properties.put("job.group", flowGroup); properties.put("specStore.fs.dir", FLOW_SPEC_STORE_DIR); properties.put("specExecInstance.capabilities", "source:destination"); properties.put("job.schedule", "0 0 0 ? * * 2050"); ; properties.put("gobblin.flow.sourceIdentifier", "source"); properties.put("gobblin.flow.destinationIdentifier", "destination"); Config config = ConfigUtils.propertiesToConfig(properties); FlowSpec.Builder flowSpecBuilder = null; flowSpecBuilder = FlowSpec.builder(computeTopologySpecURI(SPEC_STORE_PARENT_DIR, FLOW_SPEC_GROUP_DIR)) .withConfig(config) .withDescription(SPEC_DESCRIPTION) .withVersion(SPEC_VERSION) .withTemplate(URI.create("templateURI")); return flowSpecBuilder.build(); } private FlowSpec initBadFlowSpec() { // Bad Flow Spec as we don't set the job name, and will fail the compilation Properties properties = new Properties(); properties.put("specStore.fs.dir", FLOW_SPEC_STORE_DIR); properties.put("specExecInstance.capabilities", "source:destination"); properties.put("gobblin.flow.sourceIdentifier", "source"); properties.put("gobblin.flow.destinationIdentifier", "destination"); Config config = ConfigUtils.propertiesToConfig(properties); FlowSpec.Builder flowSpecBuilder = null; try { flowSpecBuilder = FlowSpec.builder(computeTopologySpecURI(SPEC_STORE_PARENT_DIR, FLOW_SPEC_GROUP_DIR)) .withConfig(config) .withDescription(SPEC_DESCRIPTION) .withVersion(SPEC_VERSION) .withTemplate(new URI("templateURI")); } catch (URISyntaxException e) { throw new RuntimeException(e); } return flowSpecBuilder.build(); } public URI computeTopologySpecURI(String parent, String current) { // Make sure this is relative URI uri = PathUtils.relativizePath(new Path(current), new Path(parent)).toUri(); return uri; } @AfterClass public void cleanUp() throws Exception { // Shutdown Catalog this.serviceLauncher.stop(); File specStoreDir = new File(SPEC_STORE_PARENT_DIR); if (specStoreDir.exists()) { FileUtils.deleteDirectory(specStoreDir); } } @Test public void createTopologySpec() { IdentityFlowToJobSpecCompiler specCompiler = (IdentityFlowToJobSpecCompiler) this.orchestrator.getSpecCompiler(); // List Current Specs Collection<Spec> specs = topologyCatalog.getSpecs(); logger.info("[Before Create] Number of specs: " + specs.size()); int i=0; for (Spec spec : specs) { TopologySpec topologySpec = (TopologySpec) spec; logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(topologySpec)); } // Make sure TopologyCatalog is empty Assert.assertTrue(specs.size() == 0, "Spec store should be empty before addition"); // Make sure TopologyCatalog Listener is empty Assert.assertTrue(specCompiler.getTopologySpecMap().size() == 0, "SpecCompiler should not know about any Topology " + "before addition"); // Create and add Spec this.topologyCatalog.put(topologySpec); // List Specs after adding specs = topologyCatalog.getSpecs(); logger.info("[After Create] Number of specs: " + specs.size()); i = 0; for (Spec spec : specs) { topologySpec = (TopologySpec) spec; logger.info("[After Create] Spec " + i++ + ": " + gson.toJson(topologySpec)); } // Make sure TopologyCatalog has the added Topology Assert.assertTrue(specs.size() == 1, "Spec store should contain 1 Spec after addition"); // Make sure TopologyCatalog Listener knows about added Topology Assert.assertTrue(specCompiler.getTopologySpecMap().size() == 1, "SpecCompiler should contain 1 Spec after addition"); } @Test (dependsOnMethods = "createTopologySpec") public void createFlowSpec() throws Throwable { // Since only 1 Topology with 1 SpecProducer has been added in previous test // .. it should be available and responsible for our new FlowSpec IdentityFlowToJobSpecCompiler specCompiler = (IdentityFlowToJobSpecCompiler) this.orchestrator.getSpecCompiler(); SpecExecutor sei = specCompiler.getTopologySpecMap().values().iterator().next().getSpecExecutor(); // List Current Specs Collection<Spec> specs = flowCatalog.getSpecs(); logger.info("[Before Create] Number of specs: " + specs.size()); int i=0; for (Spec spec : specs) { FlowSpec flowSpec = (FlowSpec) spec; logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(flowSpec)); } // Make sure FlowCatalog is empty Assert.assertTrue(specs.size() == 0, "Spec store should be empty before addition"); // Make sure FlowCatalog Listener is empty Assert.assertTrue(((List)(sei.getProducer().get().listSpecs().get())).size() == 0, "SpecProducer should not know about " + "any Flow before addition"); // Make sure we cannot add flow to specCatalog it flowSpec cannot compile Assert.expectThrows(Exception.class,() -> this.flowCatalog.put(initBadFlowSpec())); Assert.assertTrue(specs.size() == 0, "Spec store should be empty after adding bad flow spec"); // Create and add Spec this.flowCatalog.put(flowSpec); // List Specs after adding specs = flowCatalog.getSpecs(); logger.info("[After Create] Number of specs: " + specs.size()); i = 0; for (Spec spec : specs) { flowSpec = (FlowSpec) spec; logger.info("[After Create] Spec " + i++ + ": " + gson.toJson(flowSpec)); } // Make sure FlowCatalog has the added Flow Assert.assertTrue(specs.size() == 1, "Spec store should contain 1 Spec after addition"); // Orchestrator is a no-op listener for any new FlowSpecs Assert.assertTrue(((List)(sei.getProducer().get().listSpecs().get())).size() == 0, "SpecProducer should contain 0 " + "Spec after addition"); } @Test (dependsOnMethods = "createFlowSpec") public void deleteFlowSpec() throws Exception { // Since only 1 Flow has been added in previous test it should be available IdentityFlowToJobSpecCompiler specCompiler = (IdentityFlowToJobSpecCompiler) this.orchestrator.getSpecCompiler(); SpecExecutor sei = specCompiler.getTopologySpecMap().values().iterator().next().getSpecExecutor(); // List Current Specs Collection<Spec> specs = flowCatalog.getSpecs(); logger.info("[Before Delete] Number of specs: " + specs.size()); int i=0; for (Spec spec : specs) { FlowSpec flowSpec = (FlowSpec) spec; logger.info("[Before Delete] Spec " + i++ + ": " + gson.toJson(flowSpec)); } // Make sure FlowCatalog has the previously added Flow Assert.assertTrue(specs.size() == 1, "Spec store should contain 1 Flow that was added in last test"); // Orchestrator is a no-op listener for any new FlowSpecs, so no FlowSpecs should be around int specsInSEI = ((List)(sei.getProducer().get().listSpecs().get())).size(); Assert.assertTrue(specsInSEI == 0, "SpecProducer should contain 0 " + "Spec after addition because Orchestrator is a no-op listener for any new FlowSpecs"); // Remove the flow this.flowCatalog.remove(flowSpec.getUri()); // List Specs after adding specs = flowCatalog.getSpecs(); logger.info("[After Delete] Number of specs: " + specs.size()); i = 0; for (Spec spec : specs) { flowSpec = (FlowSpec) spec; logger.info("[After Delete] Spec " + i++ + ": " + gson.toJson(flowSpec)); } // Make sure FlowCatalog has the Flow removed Assert.assertTrue(specs.size() == 0, "Spec store should not contain Spec after deletion"); // Make sure FlowCatalog Listener knows about the deletion specsInSEI = ((List)(sei.getProducer().get().listSpecs().get())).size(); Assert.assertTrue(specsInSEI == 0, "SpecProducer should not contain " + "Spec after deletion"); } @Test (dependsOnMethods = "deleteFlowSpec") public void doNotRegisterMetricsAdhocFlows() throws Exception { MetricContext metricContext = this.orchestrator.getSharedFlowMetricsSingleton().getMetricContext(); this.topologyCatalog.getInitComplete().countDown(); // unblock orchestration Properties flowProps = new Properties(); flowProps.setProperty(ConfigurationKeys.FLOW_NAME_KEY, "flow0"); flowProps.setProperty(ConfigurationKeys.FLOW_GROUP_KEY, "group0"); flowProps.put("specStore.fs.dir", FLOW_SPEC_STORE_DIR); flowProps.put("specExecInstance.capabilities", "source:destination"); flowProps.put("gobblin.flow.sourceIdentifier", "source"); flowProps.put("gobblin.flow.destinationIdentifier", "destination"); flowProps.put("flow.allowConcurrentExecution", false); FlowSpec adhocSpec = new FlowSpec(URI.create("flow0/group0"), "1", "", ConfigUtils.propertiesToConfig(flowProps) , flowProps, Optional.absent(), Optional.absent()); this.orchestrator.orchestrate(adhocSpec, flowProps, 0, false); String metricName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "group0", "flow0", ServiceMetricNames.COMPILED); Assert.assertNull(metricContext.getParent().get().getGauges().get(metricName)); flowProps.setProperty("job.schedule", "0/2 * * * * ?"); FlowSpec scheduledSpec = new FlowSpec(URI.create("flow0/group0"), "1", "", ConfigUtils.propertiesToConfig(flowProps) , flowProps, Optional.absent(), Optional.absent()); this.orchestrator.orchestrate(scheduledSpec, flowProps, 0, false); Assert.assertNotNull(metricContext.getParent().get().getGauges().get(metricName)); } }
3,802
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/MysqlDagStateStoreTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.net.URI; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.typesafe.config.Config; import com.zaxxer.hikari.HikariDataSource; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metastore.MysqlStateStore; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase; import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; /** * Mainly testing functionalities related to DagStateStore but not Mysql-related components. */ public class MysqlDagStateStoreTest { private DagStateStore _dagStateStore; private Map<URI, TopologySpec> topologySpecMap; private static final String TEST_USER = "testUser"; private static final String TEST_PASSWORD = "testPassword"; private static final String TEST_DAG_STATE_STORE = "TestDagStateStore"; @BeforeClass public void setUp() throws Exception { ConfigBuilder configBuilder = ConfigBuilder.create(); // Constructing TopologySpecMap. this.topologySpecMap = new HashMap<>(); String specExecInstance = "mySpecExecutor"; TopologySpec topologySpec = DagTestUtils.buildNaiveTopologySpec(specExecInstance); URI specExecURI = new URI(specExecInstance); this.topologySpecMap.put(specExecURI, topologySpec); this._dagStateStore = new TestMysqlDagStateStore(configBuilder.build(), this.topologySpecMap); } @Test public void testWriteCheckpointAndGet() throws Exception{ Dag<JobExecutionPlan> dag_0 = DagTestUtils.buildDag("random_0", 123L); Dag<JobExecutionPlan> dag_1 = DagTestUtils.buildDag("random_1", 456L); _dagStateStore.writeCheckpoint(dag_0); _dagStateStore.writeCheckpoint(dag_1); // Verify get one dag Dag<JobExecutionPlan> dag = _dagStateStore.getDag(DagManagerUtils.generateDagId(dag_0).toString()); Assert.assertEquals(dag.getNodes().get(0), dag_0.getNodes().get(0)); Assert.assertEquals(dag.getNodes().get(1), dag_0.getNodes().get(1)); // Verify get dagIds Set<String> dagIds = _dagStateStore.getDagIds(); Assert.assertEquals(dagIds.size(), 2); Assert.assertTrue(dagIds.contains(DagManagerUtils.generateDagId(dag_0).toString())); Assert.assertTrue(dagIds.contains(DagManagerUtils.generateDagId(dag_1).toString())); // Verify get all dags List<Dag<JobExecutionPlan>> dags = _dagStateStore.getDags(); Assert.assertEquals(dags.size(), 2); // Verify dag contents Dag<JobExecutionPlan> dagDeserialized = dags.get(0); Assert.assertEquals(dagDeserialized.getNodes().size(), 2); Assert.assertEquals(dagDeserialized.getStartNodes().size(), 1); Assert.assertEquals(dagDeserialized.getEndNodes().size(), 1); Dag.DagNode<JobExecutionPlan> child = dagDeserialized.getEndNodes().get(0); Dag.DagNode<JobExecutionPlan> parent = dagDeserialized.getStartNodes().get(0); Assert.assertEquals(dagDeserialized.getParentChildMap().size(), 1); Assert.assertTrue(dagDeserialized.getParentChildMap().get(parent).contains(child)); for (int i = 0; i < 2; i++) { JobExecutionPlan plan = dagDeserialized.getNodes().get(i).getValue(); Config jobConfig = plan.getJobSpec().getConfig(); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY), "group" + "random_0"); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY), "flow" + "random_0"); Assert.assertEquals(jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY), 123L); Assert.assertEquals(plan.getExecutionStatus(), ExecutionStatus.RUNNING); Assert.assertTrue(Boolean.parseBoolean(plan.getJobFuture().get().get().toString())); Assert.assertTrue(Boolean.parseBoolean(plan.getJobFuture().get().get().toString())); } dagDeserialized = dags.get(1); Assert.assertEquals(dagDeserialized.getNodes().size(), 2); Assert.assertEquals(dagDeserialized.getStartNodes().size(), 1); Assert.assertEquals(dagDeserialized.getEndNodes().size(), 1); child = dagDeserialized.getEndNodes().get(0); parent = dagDeserialized.getStartNodes().get(0); Assert.assertEquals(dagDeserialized.getParentChildMap().size(), 1); Assert.assertTrue(dagDeserialized.getParentChildMap().get(parent).contains(child)); for (int i = 0; i < 2; i++) { JobExecutionPlan plan = dagDeserialized.getNodes().get(i).getValue(); Config jobConfig = plan.getJobSpec().getConfig(); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY), "group" + "random_1"); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY), "flow" + "random_1"); Assert.assertEquals(jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY), 456L); Assert.assertEquals(plan.getExecutionStatus(), ExecutionStatus.RUNNING); } } @Test (dependsOnMethods = "testWriteCheckpointAndGet") public void testCleanUp() throws Exception { Dag<JobExecutionPlan> dag_0 = DagTestUtils.buildDag("random_0", 123L); Dag<JobExecutionPlan> dag_1 = DagTestUtils.buildDag("random_1", 456L); _dagStateStore.writeCheckpoint(dag_0); _dagStateStore.writeCheckpoint(dag_1); List<Dag<JobExecutionPlan>> dags = _dagStateStore.getDags(); Assert.assertEquals(dags.size(), 2); _dagStateStore.cleanUp(dags.get(0)); _dagStateStore.cleanUp(DagManagerUtils.generateDagId(dags.get(1)).toString()); dags = _dagStateStore.getDags(); Assert.assertEquals(dags.size(), 0); } /** * Only overwrite {@link #createStateStore(Config)} method to directly return a mysqlStateStore * backed by mocked db. */ public class TestMysqlDagStateStore extends MysqlDagStateStore { public TestMysqlDagStateStore(Config config, Map<URI, TopologySpec> topologySpecMap) { super(config, topologySpecMap); } @Override protected StateStore<State> createStateStore(Config config) { try { // Setting up mock DB ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get(); String jdbcUrl = testMetastoreDatabase.getJdbcUrl(); HikariDataSource dataSource = new HikariDataSource(); dataSource.setDriverClassName(ConfigurationKeys.DEFAULT_STATE_STORE_DB_JDBC_DRIVER); dataSource.setAutoCommit(false); dataSource.setJdbcUrl(jdbcUrl); dataSource.setUsername(TEST_USER); dataSource.setPassword(TEST_PASSWORD); return new MysqlStateStore<>(dataSource, TEST_DAG_STATE_STORE, false, State.class); } catch (Exception e) { throw new RuntimeException(e); } } } }
3,803
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/FlowTriggerHandlerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.util.Properties; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.MultiActiveLeaseArbiter; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; import org.junit.Assert; import org.quartz.JobDataMap; import org.testng.annotations.Test; public class FlowTriggerHandlerTest { long eventToRevisit = 123000L; long minimumLingerDurationMillis = 2000L; String cronExpression = FlowTriggerHandler.createCronFromDelayPeriod(minimumLingerDurationMillis); String cronExpressionSuffix = truncateFirstTwoFieldsOfCronExpression(cronExpression); int schedulerBackOffMillis = 10; DagActionStore.DagAction flowAction = new DagActionStore.DagAction("flowName", "flowGroup", String.valueOf(eventToRevisit), DagActionStore.FlowActionType.LAUNCH); MultiActiveLeaseArbiter.LeasedToAnotherStatus leasedToAnotherStatus = new MultiActiveLeaseArbiter.LeasedToAnotherStatus(flowAction, minimumLingerDurationMillis); /** * Remove first two fields from cron expression representing seconds and minutes to return truncated cron expression * that can be used to compare to the expression generated by the original cronExpression plus a small random number * in milliseconds * @param cronExpression * @return */ private static String truncateFirstTwoFieldsOfCronExpression(String cronExpression) { String cronWithoutSeconds = cronExpression.substring(cronExpression.indexOf(" ") + 1); String cronWithoutMinutes = cronWithoutSeconds.substring(cronWithoutSeconds.indexOf(" ") + 1); return cronWithoutMinutes; } /** * Provides an input with all three values (cronExpression, reminderTimestamp, originalEventTime) set in the map * Properties and checks that they are updated properly */ @Test public void testUpdatePropsInJobDataMap() { JobDataMap oldJobDataMap = new JobDataMap(); Properties originalProperties = new Properties(); originalProperties.setProperty(ConfigurationKeys.JOB_SCHEDULE_KEY, "0 0 0 ? * * 2050"); originalProperties.setProperty(ConfigurationKeys.SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY, "0"); originalProperties.setProperty(ConfigurationKeys.SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY, "1"); oldJobDataMap.put(GobblinServiceJobScheduler.PROPERTIES_KEY, originalProperties); JobDataMap newJobDataMap = FlowTriggerHandler.updatePropsInJobDataMap(oldJobDataMap, leasedToAnotherStatus, schedulerBackOffMillis); Properties newProperties = (Properties) newJobDataMap.get(GobblinServiceJobScheduler.PROPERTIES_KEY); Assert.assertTrue(newProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY).endsWith(cronExpressionSuffix)); Assert.assertNotEquals("0", newProperties.getProperty(ConfigurationKeys.SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY)); Assert.assertEquals(String.valueOf(leasedToAnotherStatus.getEventTimeMillis()), newProperties.getProperty(ConfigurationKeys.SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY)); Assert.assertTrue(Boolean.parseBoolean(newProperties.getProperty(ConfigurationKeys.FLOW_IS_REMINDER_EVENT_KEY))); } /** * Provides input with an empty Properties object and checks that the three values in question are set. */ @Test public void testSetPropsInJobDataMap() { JobDataMap oldJobDataMap = new JobDataMap(); Properties originalProperties = new Properties(); oldJobDataMap.put(GobblinServiceJobScheduler.PROPERTIES_KEY, originalProperties); JobDataMap newJobDataMap = FlowTriggerHandler.updatePropsInJobDataMap(oldJobDataMap, leasedToAnotherStatus, schedulerBackOffMillis); Properties newProperties = (Properties) newJobDataMap.get(GobblinServiceJobScheduler.PROPERTIES_KEY); Assert.assertTrue(newProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY).endsWith(cronExpressionSuffix)); Assert.assertTrue(newProperties.containsKey(ConfigurationKeys.SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY)); Assert.assertEquals(String.valueOf(leasedToAnotherStatus.getEventTimeMillis()), newProperties.getProperty(ConfigurationKeys.SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY)); Assert.assertTrue(Boolean.parseBoolean(newProperties.getProperty(ConfigurationKeys.FLOW_IS_REMINDER_EVENT_KEY))); } /** * Tests `createSuffixForJobTrigger` helper function to ensure the suffix is constructed as we expect */ @Test public void testCreateSuffixForJobTrigger() { String suffix = FlowTriggerHandler.createSuffixForJobTrigger(leasedToAnotherStatus); Assert.assertTrue(suffix.equals("reminder_for_" + eventToRevisit)); } }
3,804
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/DagManagerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.net.URI; import java.net.URISyntaxException; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.io.FileUtils; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.codahale.metrics.Counter; import com.codahale.metrics.MetricRegistry; import com.google.common.base.Optional; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.spec_executorInstance.MockedSpecExecutor; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory; import org.apache.gobblin.service.monitoring.JobStatus; import org.apache.gobblin.service.monitoring.JobStatusRetriever; import org.apache.gobblin.util.ConfigUtils; public class DagManagerTest { private final String dagStateStoreDir = "/tmp/dagManagerTest/dagStateStore"; private DagStateStore _dagStateStore; private DagStateStore _failedDagStateStore; private JobStatusRetriever _jobStatusRetriever; private DagManagerMetrics _dagManagerMetrics; private DagManager.DagManagerThread _dagManagerThread; private LinkedBlockingQueue<Dag<JobExecutionPlan>> queue; private LinkedBlockingQueue<DagManager.DagId> cancelQueue; private LinkedBlockingQueue<DagManager.DagId> resumeQueue; private Map<DagNode<JobExecutionPlan>, Dag<JobExecutionPlan>> jobToDag; private Map<String, LinkedList<DagNode<JobExecutionPlan>>> dagToJobs; private Map<String, Dag<JobExecutionPlan>> dags; private UserQuotaManager _gobblinServiceQuotaManager; private Set<String> failedDagIds; private static long START_SLA_DEFAULT = 15 * 60 * 1000; private MetricContext metricContext; @BeforeClass public void setUp() throws Exception { FileUtils.deleteDirectory(new File(this.dagStateStoreDir)); Config config = ConfigFactory.empty() .withValue(FSDagStateStore.DAG_STATESTORE_DIR, ConfigValueFactory.fromAnyRef(this.dagStateStoreDir)); this._dagStateStore = new FSDagStateStore(config, new HashMap<>()); this._failedDagStateStore = new InMemoryDagStateStore(); this._jobStatusRetriever = Mockito.mock(JobStatusRetriever.class); this.queue = new LinkedBlockingQueue<>(); this.cancelQueue = new LinkedBlockingQueue<>(); this.resumeQueue = new LinkedBlockingQueue<>(); this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), getClass()); Config quotaConfig = ConfigFactory.empty() .withValue(AbstractUserQuotaManager.PER_USER_QUOTA, ConfigValueFactory.fromAnyRef("user:1")); this._gobblinServiceQuotaManager = new InMemoryUserQuotaManager(quotaConfig); this._dagManagerMetrics = new DagManagerMetrics(metricContext); this._dagManagerMetrics.activate(); this._dagManagerThread = new DagManager.DagManagerThread(_jobStatusRetriever, _dagStateStore, _failedDagStateStore, Optional.absent(), queue, cancelQueue, resumeQueue, true, new HashSet<>(), this._dagManagerMetrics, START_SLA_DEFAULT, _gobblinServiceQuotaManager, 0); Field jobToDagField = DagManager.DagManagerThread.class.getDeclaredField("jobToDag"); jobToDagField.setAccessible(true); this.jobToDag = (Map<DagNode<JobExecutionPlan>, Dag<JobExecutionPlan>>) jobToDagField.get(this._dagManagerThread); Field dagToJobsField = DagManager.DagManagerThread.class.getDeclaredField("dagToJobs"); dagToJobsField.setAccessible(true); this.dagToJobs = (Map<String, LinkedList<DagNode<JobExecutionPlan>>>) dagToJobsField.get(this._dagManagerThread); Field dagsField = DagManager.DagManagerThread.class.getDeclaredField("dags"); dagsField.setAccessible(true); this.dags = (Map<String, Dag<JobExecutionPlan>>) dagsField.get(this._dagManagerThread); Field failedDagIdsField = DagManager.DagManagerThread.class.getDeclaredField("failedDagIds"); failedDagIdsField.setAccessible(true); this.failedDagIds = (Set<String>) failedDagIdsField.get(this._dagManagerThread); } /** * Create a list of dags with only one node each * @return a Dag. */ static List<Dag<JobExecutionPlan>> buildDagList(int numDags, String proxyUser, Config additionalConfig) throws URISyntaxException{ List<Dag<JobExecutionPlan>> dagList = new ArrayList<>(); for (int i = 0; i < numDags; i++) { dagList.add(buildDag(Integer.toString(i), System.currentTimeMillis(), DagManager.FailureOption.FINISH_ALL_POSSIBLE.name(), 1, proxyUser, additionalConfig)); } return dagList; } /** * Create a {@link Dag <JobExecutionPlan>}. * @return a Dag. */ static Dag<JobExecutionPlan> buildDag(String id, Long flowExecutionId, String flowFailureOption, boolean flag) throws URISyntaxException { int numNodes = (flag) ? 3 : 5; return buildDag(id, flowExecutionId, flowFailureOption, numNodes); } static Dag<JobExecutionPlan> buildDag(String id, Long flowExecutionId, String flowFailureOption, int numNodes) throws URISyntaxException { return buildDag(id, flowExecutionId, flowFailureOption, numNodes, "testUser", ConfigFactory.empty()); } static Dag<JobExecutionPlan> buildDag(String id, Long flowExecutionId, String flowFailureOption, int numNodes, String proxyUser, Config additionalConfig) throws URISyntaxException { List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { String suffix = Integer.toString(i); Config jobConfig = ConfigBuilder.create(). addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "group" + id). addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flow" + id). addPrimitive(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId). addPrimitive(ConfigurationKeys.JOB_GROUP_KEY, "group" + id). addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job" + suffix). addPrimitive(ConfigurationKeys.FLOW_FAILURE_OPTION, flowFailureOption). addPrimitive(AzkabanProjectConfig.USER_TO_PROXY, proxyUser).build(); jobConfig = additionalConfig.withFallback(jobConfig); if ((i == 1) || (i == 2)) { jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef("job0")); } else if (i == 3) { jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef("job1")); } else if (i == 4) { jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef("job2")); } JobSpec js = JobSpec.builder("test_job" + suffix).withVersion(suffix).withConfig(jobConfig). withTemplate(new URI("job" + suffix)).build(); SpecExecutor specExecutor = MockedSpecExecutor.createDummySpecExecutor(new URI( ConfigUtils.getString(additionalConfig, ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY,"job" + i))); JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(js, specExecutor); jobExecutionPlans.add(jobExecutionPlan); } return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans); } static Iterator<JobStatus> getMockFlowStatus(String flowName, String flowGroup, Long flowExecutionId, String eventName) { return getMockJobStatus(flowName, flowGroup, flowExecutionId, JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY, eventName); } static Iterator<JobStatus> getMockJobStatus(String flowName, String flowGroup, Long flowExecutionId, String jobGroup, String jobName, String eventName) { return getMockJobStatus(flowName, flowGroup, flowExecutionId, jobGroup, jobName, eventName, false, flowExecutionId + 10); } private static Iterator<JobStatus> getMockJobStatus(String flowName, String flowGroup, Long flowExecutionId, String jobGroup, String jobName, String eventName, boolean shouldRetry) { return getMockJobStatus(flowName, flowGroup, flowExecutionId, jobGroup, jobName, eventName, shouldRetry, flowExecutionId + 10); } private static Iterator<JobStatus> getMockJobStatus(String flowName, String flowGroup, Long flowExecutionId, String jobGroup, String jobName, String eventName, boolean shouldRetry, Long orchestratedTime) { return Iterators.singletonIterator(JobStatus.builder().flowName(flowName).flowGroup(flowGroup).jobGroup(jobGroup).jobName(jobName).flowExecutionId(flowExecutionId). message("Test message").eventName(eventName).startTime(flowExecutionId + 10).shouldRetry(shouldRetry).orchestratedTime(orchestratedTime).build()); } @Test public void testSuccessfulDag() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String jobName1 = "job1"; String jobName2 = "job2"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", true); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator7 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator8 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever.getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8); //Run the thread once. Ensure the first job is running this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertTrue(this.jobToDag.containsKey(dag.getStartNodes().get(0))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getStartNodes().get(0))); Assert.assertEquals(this.dags.get(dagId).getNodes().get(0).getValue().getCurrentAttempts(), 1); //Run the thread 2nd time. Ensure the job0 is complete and job1 and job2 are submitted. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getEndNodes().get(0))); Assert.assertTrue(this.jobToDag.containsKey(dag.getEndNodes().get(1))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getEndNodes().get(0))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getEndNodes().get(1))); //Run the thread 3rd time. Ensure job1 and job2 are running. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getEndNodes().get(0))); Assert.assertTrue(this.jobToDag.containsKey(dag.getEndNodes().get(1))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getEndNodes().get(0))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getEndNodes().get(1))); //Run the thread 4th time. One of the jobs is completed. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); //Run the thread again. Ensure all jobs completed and dag is cleaned up. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); Assert.assertEquals(this._dagStateStore.getDags().size(), 0); } @Test (dependsOnMethods = "testSuccessfulDag") public void testFailedDag() throws URISyntaxException, IOException { for (String failureOption: Lists.newArrayList("FINISH_RUNNING", "FINISH_ALL_POSSIBLE")) { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String jobName1 = "job1"; String jobName2 = "job2"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, failureOption, false); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIterator7 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator8 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator9 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator10 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.FAILED)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8). thenReturn(jobStatusIterator9). thenReturn(jobStatusIterator10); //Run the thread once. Ensure the first job is running this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertTrue(this.jobToDag.containsKey(dag.getStartNodes().get(0))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getStartNodes().get(0))); //Run the thread 2nd time. Ensure the job0 is complete and job1 and job2 are submitted. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(1))); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(2))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(1))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(2))); //Run the thread 3rd time. Ensure the job0 is complete and job1 and job2 are running. this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(1))); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(2))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(1))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(2))); //Run the thread 4th time. this._dagManagerThread.run(); if ("FINISH_RUNNING".equals(failureOption)) { //One of the jobs is failed; so the dag is failed and all state is cleaned up. Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); Assert.assertEquals(this._dagStateStore.getDags().size(), 0); } else { //One of the jobs is failed; but with finish_all_possible, some jobs can continue running. for (int i = 0; i < 3; i++) { Assert.assertEquals(this.dags.size(), 1); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); this._dagManagerThread.run(); } //Ensure the state is cleaned up. Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); Assert.assertEquals(this._dagStateStore.getDags().size(), 0); } } } @Test (dependsOnMethods = "testFailedDag") public void testResumeDag() throws URISyntaxException { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String jobName1 = "job1"; String jobName2 = "job2"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", true); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIterator7 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIterator8 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.PENDING_RESUME)); Iterator<JobStatus> jobStatusIterator9 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator10 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator11 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.PENDING_RESUME)); Iterator<JobStatus> jobStatusIterator12 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator13 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator14 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8). thenReturn(jobStatusIterator9). thenReturn(jobStatusIterator10). thenReturn(jobStatusIterator11). thenReturn(jobStatusIterator12). thenReturn(jobStatusIterator13). thenReturn(jobStatusIterator14); // Run thread until job2 fails for (int i = 0; i < 4; i++) { this._dagManagerThread.run(); } Assert.assertTrue(this.failedDagIds.contains(dagId)); // Resume dag this.resumeQueue.offer(DagManagerUtils.generateDagId(dag)); // Job2 rerunning this._dagManagerThread.run(); Assert.assertFalse(this.failedDagIds.contains(dagId)); Assert.assertTrue(this.dags.containsKey(dagId)); // Verify the current attempt number Assert.assertEquals(dag.getNodes().get(2).getValue().getCurrentAttempts(), 1); // Job2 complete this._dagManagerThread.run(); Assert.assertFalse(this.failedDagIds.contains(dagId)); Assert.assertFalse(this.dags.containsKey(dagId)); } @Test (dependsOnMethods = "testResumeDag") public void testSucceedAfterRetry() throws Exception { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String jobName1 = "job1"; String jobName2 = "job2"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", true); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING), true); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator7 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator8 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever.getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8); //Run the thread once. Ensure the first job is running this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertTrue(this.jobToDag.containsKey(dag.getStartNodes().get(0))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getStartNodes().get(0))); // Second run: check that first job failed and is running again after retry this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertTrue(this.jobToDag.containsKey(dag.getStartNodes().get(0))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getStartNodes().get(0))); // Third run: check that first job completed successfully and now second and third job are submitted this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(1))); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(2))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(1))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(2))); // Fourth run: second and third job are running this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 2); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(1))); Assert.assertTrue(this.jobToDag.containsKey(dag.getNodes().get(2))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 2); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(1))); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getNodes().get(2))); // Fifth run: second and third job complete and dag is cleaned up this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); Assert.assertEquals(this._dagStateStore.getDags().size(), 0); } @Test (dependsOnMethods = "testSucceedAfterRetry") public void testFailAfterRetry() throws Exception { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", true); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING), true); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING), true); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING), true); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.PENDING_RETRY), true); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator7 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIterator8 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.FAILED)); Mockito.when(_jobStatusRetriever.getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8); // Run 4 times, first job fails every time and is retried for (int i = 0; i < 4; i++) { this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId)); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertTrue(this.jobToDag.containsKey(dag.getStartNodes().get(0))); Assert.assertEquals(this.dagToJobs.get(dagId).size(), 1); Assert.assertTrue(this.dagToJobs.get(dagId).contains(dag.getStartNodes().get(0))); Assert.assertEquals(dag.getStartNodes().get(0).getValue().getCurrentAttempts(), i + 1); } // Got a PENDING_RETRY state this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertEquals(this.dagToJobs.size(), 1); this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertEquals(this.dagToJobs.size(), 1); // Last run fails and dag is cleaned up this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); Assert.assertEquals(this._dagStateStore.getDags().size(), 0); } @Test (dependsOnMethods = "testFailAfterRetry") public void testResumeCancelledDag() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String jobName1 = "job1"; String jobName2 = "job2"; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", true); String dagId = DagManagerUtils.generateDagId(dag).toString(); //Add a dag to the queue of dags this.queue.offer(dag); Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator4 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator5 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator6 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIterator7 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIterator8 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.PENDING_RESUME)); Iterator<JobStatus> jobStatusIterator9 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator10 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName1, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator11 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.PENDING_RESUME)); Iterator<JobStatus> jobStatusIterator12 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIterator13 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName2, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator14 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6). thenReturn(jobStatusIterator7). thenReturn(jobStatusIterator8). thenReturn(jobStatusIterator9). thenReturn(jobStatusIterator10). thenReturn(jobStatusIterator11). thenReturn(jobStatusIterator12). thenReturn(jobStatusIterator13). thenReturn(jobStatusIterator14); // Run until job2 cancelled for (int i = 0; i < 3; i++) { this._dagManagerThread.run(); } // Cancel job2 this.cancelQueue.offer(DagManagerUtils.generateDagId(dag)); this._dagManagerThread.run(); Assert.assertTrue(this.failedDagIds.contains(dagId)); // Resume dag this.resumeQueue.offer(DagManagerUtils.generateDagId(dag)); // Job2 rerunning this._dagManagerThread.run(); Assert.assertFalse(this.failedDagIds.contains(dagId)); Assert.assertTrue(this.dags.containsKey(dagId)); // Job2 complete this._dagManagerThread.run(); Assert.assertFalse(this.failedDagIds.contains(dagId)); Assert.assertFalse(this.dags.containsKey(dagId)); } @Test (dependsOnMethods = "testResumeCancelledDag") public void testJobStartSLAKilledDag() throws URISyntaxException, IOException { String slakilledMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "job0", ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER); long slaKilledMeterCount = metricContext.getParent().get().getMeters().get(slakilledMeterName) == null? 0 : metricContext.getParent().get().getMeters().get(slakilledMeterName).getCount(); long flowExecutionId = System.currentTimeMillis(); String flowGroupId = "0"; String flowGroup = "group" + flowGroupId; String flowName = "flow" + flowGroupId; String jobName0 = "job0"; String flowGroupId1 = "1"; String flowGroup1 = "group" + flowGroupId1; String flowName1 = "flow" + flowGroupId1; Dag<JobExecutionPlan> dag = buildDag(flowGroupId, flowExecutionId, "FINISH_RUNNING", false); Dag<JobExecutionPlan> dag1 = buildDag(flowGroupId1, flowExecutionId+1, "FINISH_RUNNING", false); String dagId = DagManagerUtils.generateDagId(dag).toString(); String dagId1 = DagManagerUtils.generateDagId(dag1).toString(); //Add a dag to the queue of dags this.queue.offer(dag); // The start time should be 16 minutes ago, which is past the start SLA so the job should be cancelled Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.ORCHESTRATED), false, flowExecutionId - Duration.ofMinutes(16).toMillis()); // This is for the second Dag that does not match the SLA so should schedule normally Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus(flowName1, flowGroup1, flowExecutionId+1, jobName0, flowGroup1, String.valueOf(ExecutionStatus.ORCHESTRATED), false, flowExecutionId - Duration.ofMinutes(10).toMillis()); // Let the first job get reported as cancel due to SLA kill on start and clean up Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus(flowName, flowGroup, flowExecutionId, jobName0, flowGroup, String.valueOf(ExecutionStatus.CANCELLED), false, flowExecutionId - Duration.ofMinutes(16).toMillis()); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockFlowStatus(flowName, flowGroup, flowExecutionId, String.valueOf(ExecutionStatus.CANCELLED)); // Cleanup the running job that is scheduled normally Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockJobStatus(flowName1, flowGroup1, flowExecutionId+1, jobName0, flowGroup1, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_2 = getMockJobStatus(flowName1, flowGroup1, flowExecutionId+1, "job1", flowGroup1, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_3 = getMockJobStatus(flowName1, flowGroup1, flowExecutionId+1, "job2", flowGroup1, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_4 = getMockJobStatus(flowName1, flowGroup1, flowExecutionId+1, "job2", flowGroup1, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_5 = getMockFlowStatus(flowName1, flowGroup1, flowExecutionId+1, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow0_0). thenReturn(jobStatusIteratorFlow0_1). thenReturn(jobStatusIteratorFlow0_2); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow1_0). thenReturn(jobStatusIteratorFlow1_1). thenReturn(jobStatusIteratorFlow1_2). thenReturn(jobStatusIteratorFlow1_3).thenReturn(jobStatusIteratorFlow1_4).thenReturn(jobStatusIteratorFlow1_5); // Run the thread once. Ensure the first job is running this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); // Job should be marked as failed Assert.assertTrue(this.failedDagIds.contains(dagId)); // Next job should succeed as it doesn't exceed SLA this.queue.offer(dag1); this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 1); Assert.assertEquals(this.jobToDag.size(), 1); Assert.assertEquals(this.dagToJobs.size(), 1); Assert.assertTrue(this.dags.containsKey(dagId1)); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slakilledMeterName).getCount(), slaKilledMeterCount + 1); // Cleanup this._dagManagerThread.run(); this._dagManagerThread.run(); this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testJobStartSLAKilledDag") public void testJobKilledSLAMetricsArePerExecutor() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis(); // The start time should be 16 minutes ago, which is past the start SLA so the job should be cancelled long startOrchestrationTime = flowExecutionId - Duration.ofMinutes(16).toMillis(); Config executorOneConfig = ConfigFactory.empty() .withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorOne")) .withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId)); Config executorTwoConfig = ConfigFactory.empty().withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorTwo")); List<Dag<JobExecutionPlan>> dagList = buildDagList(2, "user", executorOneConfig); dagList.add(buildDag("2", flowExecutionId, "FINISH_RUNNING", 1, "user", executorTwoConfig)); String allSlaKilledMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER); long previousSlaKilledCount = metricContext.getParent().get().getMeters().get(allSlaKilledMeterName) == null ? 0 : metricContext.getParent().get().getMeters().get(allSlaKilledMeterName).getCount(); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); this.queue.offer(dagList.get(1)); this.queue.offer(dagList.get(2));; Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus("flow0", "group0", flowExecutionId, "job0", "group0", String.valueOf(ExecutionStatus.ORCHESTRATED), false, startOrchestrationTime); Iterator<JobStatus> jobStatusIterator2 = getMockJobStatus("flow1", "flow1", flowExecutionId+1, "job0", "group1", String.valueOf(ExecutionStatus.ORCHESTRATED), false, startOrchestrationTime); Iterator<JobStatus> jobStatusIterator3 = getMockJobStatus("flow2", "flow2", flowExecutionId+1, "job0", "group2", String.valueOf(ExecutionStatus.ORCHESTRATED), false, startOrchestrationTime); Iterator<JobStatus> jobStatusIterator4 = getMockFlowStatus("flow2", "flow2", flowExecutionId+1, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIterator5 = getMockFlowStatus("flow2", "flow2", flowExecutionId+1, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIterator6 = getMockFlowStatus("flow2", "flow2", flowExecutionId+1, String.valueOf(ExecutionStatus.CANCELLED)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2). thenReturn(jobStatusIterator3). thenReturn(jobStatusIterator4). thenReturn(jobStatusIterator5). thenReturn(jobStatusIterator6); // Run the thread once. All 3 jobs should be emitted an SLA exceeded event this._dagManagerThread.run(); String slakilledMeterName1 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorOne", ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER); String slakilledMeterName2 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorTwo", ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slakilledMeterName1).getCount(), 2); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slakilledMeterName2).getCount(), 1); // Cleanup this._dagManagerThread.run(); Assert.assertEquals(metricContext.getParent().get().getMeters().get(allSlaKilledMeterName).getCount(), previousSlaKilledCount + 3); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testJobKilledSLAMetricsArePerExecutor") public void testDagManagerWithBadFlowSLAConfig() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis(); String flowGroup = "group0"; String flowName = "flow0"; String jobName = "job0"; // Create a config with an improperly formatted Flow SLA time e.g. "1h" Config jobConfig = ConfigBuilder.create(). addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "group" + flowGroup). addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flow" + flowName). addPrimitive(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId). addPrimitive(ConfigurationKeys.JOB_GROUP_KEY, flowGroup). addPrimitive(ConfigurationKeys.JOB_NAME_KEY, jobName). addPrimitive(ConfigurationKeys.FLOW_FAILURE_OPTION, "FINISH_RUNNING"). addPrimitive(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, "1h").build(); List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>(); JobSpec js = JobSpec.builder("test_job" + jobName).withVersion("0").withConfig(jobConfig). withTemplate(new URI(jobName)).build(); SpecExecutor specExecutor = MockedSpecExecutor.createDummySpecExecutor(new URI(jobName)); JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(js, specExecutor); jobExecutionPlans.add(jobExecutionPlan); Dag<JobExecutionPlan> dag = (new JobExecutionPlanDagFactory()).createDag(jobExecutionPlans); //Add a dag to the queue of dags this.queue.offer(dag); // Job should have been run normally without breaking on SLA check, so we can just mark as completed for status Iterator<JobStatus> jobStatusIterator1 = getMockJobStatus(flowName, flowGroup, flowExecutionId+1, jobName, flowGroup, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIterator2 = getMockFlowStatus(flowName, flowGroup, flowExecutionId+1, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIterator1). thenReturn(jobStatusIterator2); // Run the thread once. Job should run without crashing thread on SLA check and cleanup this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testDagManagerWithBadFlowSLAConfig") public void testDagManagerQuotaExceeded() throws URISyntaxException, IOException { List<Dag<JobExecutionPlan>> dagList = buildDagList(2, "user", ConfigFactory.empty()); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); Config jobConfig0 = dagList.get(0).getNodes().get(0).getValue().getJobSpec().getConfig(); Config jobConfig1 = dagList.get(1).getNodes().get(0).getValue().getJobSpec().getConfig(); Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group1", String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockFlowStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.FAILED)); // Cleanup the running job that is scheduled normally Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow0_3 = getMockFlowStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow0_0) .thenReturn(jobStatusIteratorFlow0_1) .thenReturn(jobStatusIteratorFlow0_2) .thenReturn(jobStatusIteratorFlow0_3); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow1_0) .thenReturn(jobStatusIteratorFlow1_1); this._dagManagerThread.run(); // dag will not be processed due to exceeding the quota, will log a message and exit out without adding it to dags this.queue.offer(dagList.get(1)); this._dagManagerThread.run(); SortedMap<String, Counter> allCounters = metricContext.getParent().get().getCounters(); Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 1); this._dagManagerThread.run(); // cleanup Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testDagManagerQuotaExceeded") public void testQuotaDecrement() throws URISyntaxException, IOException { List<Dag<JobExecutionPlan>> dagList = buildDagList(3, "user", ConfigFactory.empty()); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); this.queue.offer(dagList.get(1)); Config jobConfig0 = dagList.get(0).getNodes().get(0).getValue().getJobSpec().getConfig(); Config jobConfig1 = dagList.get(1).getNodes().get(0).getValue().getJobSpec().getConfig(); Config jobConfig2 = dagList.get(1).getNodes().get(0).getValue().getJobSpec().getConfig(); Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group1", String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.RUNNING)); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow2_0 = getMockJobStatus("flow2", "group2", Long.valueOf(jobConfig2.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group2", String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIteratorFlow0_3 = getMockFlowStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockFlowStatus("flow1", "group2", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIteratorFlow2_1 = getMockFlowStatus("flow1", "group2", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.FAILED)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow0_0) .thenReturn(jobStatusIteratorFlow0_1) .thenReturn(jobStatusIteratorFlow0_2) .thenReturn(jobStatusIteratorFlow0_3); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow1_0) .thenReturn(jobStatusIteratorFlow1_1); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow2"), Mockito.eq("group2"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow2_0) .thenReturn(jobStatusIteratorFlow2_1); this._dagManagerThread.run(); SortedMap<String, Counter> allCounters = metricContext.getParent().get().getCounters(); Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 1); // Test case where a job that exceeded a quota would cause a double decrement after fixing the proxy user name, allowing for more jobs to run this.queue.offer(dagList.get(2)); this._dagManagerThread.run(); // Assert that running dag metrics are only counted once Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 1); this._dagManagerThread.run(); // cleanup Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 0); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testQuotaDecrement") public void testQuotasRetryFlow() throws URISyntaxException, IOException { List<Dag<JobExecutionPlan>> dagList = buildDagList(2, "user", ConfigFactory.empty()); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); Config jobConfig0 = dagList.get(0).getNodes().get(0).getValue().getJobSpec().getConfig(); Config jobConfig1 = dagList.get(1).getNodes().get(0).getValue().getJobSpec().getConfig(); Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.ORCHESTRATED), true); // Cleanup the running job that is scheduled normally Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.RUNNING), true); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.ORCHESTRATED)); Iterator<JobStatus> jobStatusIteratorFlow0_3 = getMockJobStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow0_4 = getMockFlowStatus("flow0", "group0", Long.valueOf(jobConfig0.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group1", String.valueOf(ExecutionStatus.ORCHESTRATED)); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockJobStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), "job0", "group1", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_2 = getMockFlowStatus("flow1", "group1", Long.valueOf(jobConfig1.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)), String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow0_0) .thenReturn(jobStatusIteratorFlow0_1) .thenReturn(jobStatusIteratorFlow0_2) .thenReturn(jobStatusIteratorFlow0_3) .thenReturn(jobStatusIteratorFlow0_4); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow1_0) .thenReturn(jobStatusIteratorFlow1_1) .thenReturn(jobStatusIteratorFlow1_2); // Dag1 is running this._dagManagerThread.run(); SortedMap<String, Counter> allCounters = metricContext.getParent().get().getCounters(); Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 1); // Dag1 fails and is orchestrated again this._dagManagerThread.run(); // Dag1 is running again this._dagManagerThread.run(); Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 1); // Dag1 is marked as complete, should be able to run the next Dag without hitting the quota limit this._dagManagerThread.run(); this.queue.offer(dagList.get(1)); this._dagManagerThread.run(); this._dagManagerThread.run(); // cleanup Assert.assertEquals(allCounters.get(MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, "user")).getCount(), 0); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testQuotasRetryFlow") public void testEmitFlowMetricOnlyIfNotAdhoc() throws URISyntaxException, IOException { Long flowId = System.currentTimeMillis(); Dag<JobExecutionPlan> adhocDag = buildDag(String.valueOf(flowId), flowId, "FINISH_RUNNING", 1, "proxyUser", ConfigBuilder.create().addPrimitive(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, false).build()); //Add a dag to the queue of dags this.queue.offer(adhocDag); Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus("flow" + flowId, "group" + flowId, flowId, "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockFlowStatus("flow" + flowId, "group" + flowId, flowId, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow" + flowId+1, "group" + flowId+1, flowId+1, "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockFlowStatus("flow" + flowId+1, "group" + flowId+1, flowId+1, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow" + flowId), Mockito.eq("group" + flowId), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow0_0) .thenReturn(jobStatusIteratorFlow0_1); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow" + (flowId+1)), Mockito.eq("group" + (flowId+1)), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())) .thenReturn(jobStatusIteratorFlow1_0) .thenReturn(jobStatusIteratorFlow1_1); String flowStateGaugeName0 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "group"+flowId, "flow"+flowId, ServiceMetricNames.RUNNING_STATUS); Assert.assertNull(metricContext.getParent().get().getGauges().get(flowStateGaugeName0)); Dag<JobExecutionPlan> scheduledDag = buildDag(String.valueOf(flowId+1), flowId+1, "FINISH_RUNNING", 1, "proxyUser", ConfigBuilder.create().addPrimitive(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, true).build()); this.queue.offer(scheduledDag); this._dagManagerThread.run(); String flowStateGaugeName1 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "group"+(flowId+1), "flow"+(flowId+1), ServiceMetricNames.RUNNING_STATUS); Assert.assertNotNull(metricContext.getParent().get().getGauges().get(flowStateGaugeName1)); // cleanup this._dagManagerThread.run(); // should be successful since it should be cleaned up with status complete Assert.assertEquals(metricContext.getParent().get().getGauges().get(flowStateGaugeName1).getValue(), DagManager.FlowState.SUCCESSFUL.value); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testEmitFlowMetricOnlyIfNotAdhoc") public void testJobSlaKilledMetrics() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis() - Duration.ofMinutes(20).toMillis(); Config executorOneConfig = ConfigFactory.empty() .withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorOne")) .withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId)) .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, ConfigValueFactory.fromAnyRef(10)) .withValue(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, ConfigValueFactory.fromAnyRef(true)); Config executorTwoConfig = ConfigFactory.empty() .withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorTwo")) .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, ConfigValueFactory.fromAnyRef(10)) .withValue(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, ConfigValueFactory.fromAnyRef(true)); List<Dag<JobExecutionPlan>> dagList = buildDagList(2, "newUser", executorOneConfig); dagList.add(buildDag("2", flowExecutionId, "FINISH_RUNNING", 1, "newUser", executorTwoConfig)); String allSlaKilledMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER); long previousSlaKilledCount = metricContext.getParent().get().getMeters().get(allSlaKilledMeterName) == null ? 0 : metricContext.getParent().get().getMeters().get(allSlaKilledMeterName).getCount(); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); this.queue.offer(dagList.get(1)); this.queue.offer(dagList.get(2));; // Set orchestration time to be 20 minutes in the past, the job should be marked as SLA killed Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus("flow0", "group0", flowExecutionId, "job0", "group0", String.valueOf(ExecutionStatus.RUNNING), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow1", "group1", flowExecutionId, "job0", "group1", String.valueOf(ExecutionStatus.RUNNING), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow2_0 = getMockJobStatus("flow2", "group2", flowExecutionId, "job0", "group2", String.valueOf(ExecutionStatus.RUNNING), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus("flow0", "group0", flowExecutionId, "job0", "group0", String.valueOf(ExecutionStatus.CANCELLED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockJobStatus("flow1", "group1", flowExecutionId, "job0", "group1", String.valueOf(ExecutionStatus.CANCELLED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow2_1 = getMockJobStatus("flow2", "group2", flowExecutionId, "job0", "group2", String.valueOf(ExecutionStatus.CANCELLED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockFlowStatus("flow0", "group0", flowExecutionId, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIteratorFlow1_2 = getMockFlowStatus("flow1", "group1", flowExecutionId, String.valueOf(ExecutionStatus.CANCELLED)); Iterator<JobStatus> jobStatusIteratorFlow2_2 = getMockFlowStatus("flow2", "group2", flowExecutionId, String.valueOf(ExecutionStatus.CANCELLED)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow0_0). thenReturn(jobStatusIteratorFlow0_1). thenReturn(jobStatusIteratorFlow0_2); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow1_0). thenReturn(jobStatusIteratorFlow1_1). thenReturn(jobStatusIteratorFlow1_2); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow2"), Mockito.eq("group2"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow2_0). thenReturn(jobStatusIteratorFlow2_1). thenReturn(jobStatusIteratorFlow2_2); // Run the thread once. All 3 jobs should be emitted an SLA exceeded event this._dagManagerThread.run(); this._dagManagerThread.run(); String slakilledMeterName1 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorOne", ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER); String slakilledMeterName2 = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorTwo", ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER); String failedFlowGauge = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "group1","flow1", ServiceMetricNames.RUNNING_STATUS); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slakilledMeterName1).getCount(), 2); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slakilledMeterName2).getCount(), 1); // Cleanup this._dagManagerThread.run(); Assert.assertEquals(metricContext.getParent().get().getMeters().get(allSlaKilledMeterName).getCount(), previousSlaKilledCount + 3); Assert.assertEquals(metricContext.getParent().get().getGauges().get(failedFlowGauge).getValue(), -1); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @Test (dependsOnMethods = "testJobSlaKilledMetrics") public void testPerExecutorMetricsSuccessFails() throws URISyntaxException, IOException { long flowExecutionId = System.currentTimeMillis(); Config executorOneConfig = ConfigFactory.empty() .withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorOne")) .withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId)); Config executorTwoConfig = ConfigFactory.empty() .withValue(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, ConfigValueFactory.fromAnyRef("executorTwo")); List<Dag<JobExecutionPlan>> dagList = buildDagList(2, "newUser", executorOneConfig); dagList.add(buildDag("2", flowExecutionId, "FINISH_RUNNING", 1, "newUser", executorTwoConfig)); // Get global metric count before any changes are applied String allSuccessfulFlowsMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SUCCESSFUL_FLOW_METER); long previousSuccessCount = metricContext.getParent().get().getMeters().get(allSuccessfulFlowsMeterName) == null ? 0 : metricContext.getParent().get().getMeters().get(allSuccessfulFlowsMeterName).getCount(); String previousJobSentToExecutorMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorOne", ServiceMetricNames.JOBS_SENT_TO_SPEC_EXECUTOR); long previousJobSentToExecutorCount = metricContext.getParent().get().getMeters().get(previousJobSentToExecutorMeterName) == null ? 0 : metricContext.getParent().get().getMeters().get(previousJobSentToExecutorMeterName).getCount(); //Add a dag to the queue of dags this.queue.offer(dagList.get(0)); this.queue.offer(dagList.get(1)); this.queue.offer(dagList.get(2));; // The start time should be 16 minutes ago, which is past the start SLA so the job should be cancelled Iterator<JobStatus> jobStatusIteratorFlow0_0 = getMockJobStatus( "flow0", "group0", flowExecutionId, "job0", "group0", String.valueOf(ExecutionStatus.ORCHESTRATED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow1_0 = getMockJobStatus("flow1", "group1", flowExecutionId+1, "job0", "group1", String.valueOf(ExecutionStatus.ORCHESTRATED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow2_0 = getMockJobStatus("flow2", "group2", flowExecutionId+1, "job0", "group2", String.valueOf(ExecutionStatus.ORCHESTRATED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow0_1 = getMockJobStatus( "flow0", "group0", flowExecutionId+1, "job0", "group0", String.valueOf(ExecutionStatus.COMPLETE), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow1_1 = getMockJobStatus("flow1", "group1", flowExecutionId+1, "job0", "group1", String.valueOf(ExecutionStatus.FAILED), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow2_1 = getMockJobStatus("flow2", "group2", flowExecutionId+1, "job0", "group2", String.valueOf(ExecutionStatus.COMPLETE), false, flowExecutionId); Iterator<JobStatus> jobStatusIteratorFlow0_2 = getMockFlowStatus( "flow0", "group0", flowExecutionId+1, String.valueOf(ExecutionStatus.COMPLETE)); Iterator<JobStatus> jobStatusIteratorFlow1_2 = getMockFlowStatus("flow1", "group1", flowExecutionId+1, String.valueOf(ExecutionStatus.FAILED)); Iterator<JobStatus> jobStatusIteratorFlow2_2 = getMockFlowStatus("flow2", "group2", flowExecutionId+1, String.valueOf(ExecutionStatus.COMPLETE)); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow0"), Mockito.eq("group0"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow0_0). thenReturn(jobStatusIteratorFlow0_1). thenReturn(jobStatusIteratorFlow0_2); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow1"), Mockito.eq("group1"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow1_0). thenReturn(jobStatusIteratorFlow1_1). thenReturn(jobStatusIteratorFlow1_2); Mockito.when(_jobStatusRetriever .getJobStatusesForFlowExecution(Mockito.eq("flow2"), Mockito.eq("group2"), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())). thenReturn(jobStatusIteratorFlow2_0). thenReturn(jobStatusIteratorFlow2_1). thenReturn(jobStatusIteratorFlow2_2); this._dagManagerThread.run(); String slaSuccessfulFlowsExecutorOneMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorOne", ServiceMetricNames.SUCCESSFUL_FLOW_METER); String slaFailedFlowsExecutorOneMeterName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "executorOne", ServiceMetricNames.FAILED_FLOW_METER); String failedFlowGauge = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, "group1", "flow1", ServiceMetricNames.RUNNING_STATUS); this._dagManagerThread.run(); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slaSuccessfulFlowsExecutorOneMeterName).getCount(), 1); Assert.assertEquals(metricContext.getParent().get().getMeters().get(slaFailedFlowsExecutorOneMeterName).getCount(), 1); Assert.assertEquals(metricContext.getParent().get().getMeters().get(allSuccessfulFlowsMeterName).getCount(), previousSuccessCount + 2); Assert.assertEquals(metricContext.getParent().get().getMeters().get(previousJobSentToExecutorMeterName).getCount(), previousJobSentToExecutorCount + 2); Assert.assertEquals(metricContext.getParent().get().getGauges().get(failedFlowGauge).getValue(), -1); // Cleanup this._dagManagerThread.run(); Assert.assertEquals(this.dags.size(), 0); Assert.assertEquals(this.jobToDag.size(), 0); Assert.assertEquals(this.dagToJobs.size(), 0); } @AfterClass public void cleanUp() throws Exception { FileUtils.deleteDirectory(new File(this.dagStateStoreDir)); } public static class InMemoryDagStateStore implements DagStateStore { private final Map<String, Dag<JobExecutionPlan>> dags = new ConcurrentHashMap<>(); public void writeCheckpoint(Dag<JobExecutionPlan> dag) { dags.put(DagManagerUtils.generateDagId(dag).toString(), dag); } public void cleanUp(Dag<JobExecutionPlan> dag) { cleanUp(DagManagerUtils.generateDagId(dag).toString()); } public void cleanUp(String dagId) { dags.remove(dagId); } public List<Dag<JobExecutionPlan>> getDags() { return new ArrayList<>(dags.values()); } public Dag<JobExecutionPlan> getDag(String dagId) { return dags.get(dagId); } public Set<String> getDagIds() { return dags.keySet(); } } }
3,805
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/DagManagerFlowTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.IOException; import java.net.URI; import java.util.Collections; import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.base.Predicate; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import javax.annotation.Nullable; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase; import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.runtime.dag_action_store.MysqlDagActionStore; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.monitoring.JobStatusRetriever; import org.apache.gobblin.testing.AssertWithBackoff; import org.apache.gobblin.util.ConfigUtils; import static org.mockito.Mockito.*; public class DagManagerFlowTest { MockedDagManager dagManager; int dagNumThreads; static final String ERROR_MESSAGE = "Waiting for the map to update"; private static final String USER = "testUser"; private static final String PASSWORD = "testPassword"; private static final String TABLE = "dag_action_store"; private static final String flowGroup = "testFlowGroup"; private static final String flowName = "testFlowName"; private static final String flowExecutionId = "12345677"; private static final String flowExecutionId_2 = "12345678"; private DagActionStore dagActionStore; @BeforeClass public void setUp() throws Exception { Properties props = new Properties(); props.put(DagManager.JOB_STATUS_POLLING_INTERVAL_KEY, 1); ITestMetastoreDatabase testDb = TestMetastoreDatabaseFactory.get(); Config config = ConfigBuilder.create() .addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_URL_KEY, testDb.getJdbcUrl()) .addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_USER_KEY, USER) .addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, PASSWORD) .addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, TABLE) .build(); dagActionStore = new MysqlDagActionStore(config); dagActionStore.addDagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL); dagActionStore.addDagAction(flowGroup, flowName, flowExecutionId_2, DagActionStore.FlowActionType.RESUME); dagManager = new MockedDagManager(ConfigUtils.propertiesToConfig(props), false); dagManager.dagActionStore = Optional.of(dagActionStore); dagManager.setActive(true); this.dagNumThreads = dagManager.getNumThreads(); Thread.sleep(30000); // On active, should proceed request and delete action entry Assert.assertEquals(dagActionStore.getDagActions().size(), 0); } @AfterClass public void cleanUp() throws Exception { dagManager.setActive(false); Assert.assertEquals(dagManager.getHouseKeepingThreadPool().isShutdown(), true); } @Test void testAddDeleteSpec() throws Exception { long flowExecutionId1 = System.currentTimeMillis(); long flowExecutionId2 = flowExecutionId1 + 1; long flowExecutionId3 = flowExecutionId1 + 2; Dag<JobExecutionPlan> dag1 = DagManagerTest.buildDag("0", flowExecutionId1, "FINISH_RUNNING", 1); Dag<JobExecutionPlan> dag2 = DagManagerTest.buildDag("1", flowExecutionId2, "FINISH_RUNNING", 1); Dag<JobExecutionPlan> dag3 = DagManagerTest.buildDag("2", flowExecutionId3, "FINISH_RUNNING", 1); String dagId1 = DagManagerUtils.generateDagId(dag1).toString(); String dagId2 = DagManagerUtils.generateDagId(dag2).toString(); String dagId3 = DagManagerUtils.generateDagId(dag3).toString(); int queue1 = DagManagerUtils.getDagQueueId(dag1, dagNumThreads); int queue2 = DagManagerUtils.getDagQueueId(dag2, dagNumThreads); int queue3 = DagManagerUtils.getDagQueueId(dag3, dagNumThreads); when(this.dagManager.getJobStatusRetriever().getLatestExecutionIdsForFlow(eq("flow0"), eq("group0"), anyInt())) .thenReturn(Collections.singletonList(flowExecutionId1)); when(this.dagManager.getJobStatusRetriever().getLatestExecutionIdsForFlow(eq("flow1"), eq("group1"), anyInt())) .thenReturn(Collections.singletonList(flowExecutionId2)); when(this.dagManager.getJobStatusRetriever().getLatestExecutionIdsForFlow(eq("flow2"), eq("group2"), anyInt())) .thenReturn(Collections.singletonList(flowExecutionId3)); // mock add spec dagManager.addDag(dag1, true, true); dagManager.addDag(dag2, true, true); dagManager.addDag(dag3, true, true); // check existence of dag in dagToJobs map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue1].dagToJobs.containsKey(dagId1), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue2].dagToJobs.containsKey(dagId2), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue3].dagToJobs.containsKey(dagId3), ERROR_MESSAGE); // mock cancel job dagManager.stopDag(FlowSpec.Utils.createFlowSpecUri(new FlowId().setFlowGroup("group0").setFlowName("flow0"))); dagManager.stopDag(FlowSpec.Utils.createFlowSpecUri(new FlowId().setFlowGroup("group1").setFlowName("flow1"))); dagManager.stopDag(FlowSpec.Utils.createFlowSpecUri(new FlowId().setFlowGroup("group2").setFlowName("flow2"))); // verify cancelJob() of specProducer is called once AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1).assertTrue(new CancelPredicate(dag1), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1).assertTrue(new CancelPredicate(dag2), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1).assertTrue(new CancelPredicate(dag3), ERROR_MESSAGE); // mock flow cancellation tracking event Mockito.doReturn(DagManagerTest.getMockJobStatus("flow0", "group0", flowExecutionId1, "group0", "job0", String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow0", "group0", flowExecutionId1, "job0", "group0"); Mockito.doReturn(DagManagerTest.getMockJobStatus("flow1", "group1", flowExecutionId2, "group1", "job0", String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow1", "group1", flowExecutionId2, "job0", "group1"); Mockito.doReturn(DagManagerTest.getMockJobStatus("flow2", "group2", flowExecutionId3, "group2", "job0", String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow2", "group2", flowExecutionId3, "job0", "group2"); Mockito.doReturn(DagManagerTest.getMockFlowStatus("flow0", "group0", flowExecutionId1, String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow0", "group0", flowExecutionId1, JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY); Mockito.doReturn(DagManagerTest.getMockFlowStatus("flow1", "group1", flowExecutionId2, String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow1", "group1", flowExecutionId2, JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY); Mockito.doReturn(DagManagerTest.getMockFlowStatus("flow2", "group2", flowExecutionId3, String.valueOf(ExecutionStatus.CANCELLED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow2", "group2", flowExecutionId3, JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY); // check removal of dag in dagToJobs map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> !dagManager.dagManagerThreads[queue1].dagToJobs.containsKey(dagId1), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1). assertTrue(input -> !dagManager.dagManagerThreads[queue2].dagToJobs.containsKey(dagId2), ERROR_MESSAGE); AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1). assertTrue(input -> !dagManager.dagManagerThreads[queue3].dagToJobs.containsKey(dagId3), ERROR_MESSAGE); } @Test void testFlowSlaWithoutConfig() throws Exception { long flowExecutionId = System.currentTimeMillis(); Dag<JobExecutionPlan> dag = DagManagerTest.buildDag("3", flowExecutionId, "FINISH_RUNNING", 1); String dagId = DagManagerUtils.generateDagId(dag).toString(); int queue = DagManagerUtils.getDagQueueId(dag, dagNumThreads); when(this.dagManager.getJobStatusRetriever().getLatestExecutionIdsForFlow(eq("flow3"), eq("group3"), anyInt())) .thenReturn(Collections.singletonList(flowExecutionId)); // mock add spec dagManager.addDag(dag, true, true); // check existence of dag in dagToJobs map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToJobs.containsKey(dagId), ERROR_MESSAGE); // check existence of dag in dagToSLA map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToSLA.containsKey(dagId), ERROR_MESSAGE); // check the SLA value Assert.assertEquals(dagManager.dagManagerThreads[queue].dagToSLA.get(dagId).longValue(), DagManagerUtils.DEFAULT_FLOW_SLA_MILLIS); // verify cancelJob() of the specProducer is not called once // which means job cancellation was triggered try { AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1).assertTrue(new CancelPredicate(dag), ERROR_MESSAGE); } catch (TimeoutException e) { AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToJobs.containsKey(dagId), ERROR_MESSAGE); return; } Assert.fail("Job cancellation was not triggered."); } @Test() void testFlowSlaWithConfig() throws Exception { long flowExecutionId = System.currentTimeMillis(); Dag<JobExecutionPlan> dag = DagManagerTest.buildDag("4", flowExecutionId, "FINISH_RUNNING", 1); String dagId = DagManagerUtils.generateDagId(dag).toString(); int queue = DagManagerUtils.getDagQueueId(dag, dagNumThreads); when(this.dagManager.getJobStatusRetriever().getLatestExecutionIdsForFlow(eq("flow4"), eq("group4"), anyInt())) .thenReturn(Collections.singletonList(flowExecutionId)); // change config to set a small sla Config jobConfig = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig(); jobConfig = jobConfig .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, ConfigValueFactory.fromAnyRef("7")) .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME_UNIT, ConfigValueFactory.fromAnyRef(TimeUnit.SECONDS.name())); dag.getStartNodes().get(0).getValue().getJobSpec().setConfig(jobConfig); // mock add spec dagManager.addDag(dag, true, true); // check existence of dag in dagToSLA map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToSLA.containsKey(dagId), ERROR_MESSAGE); // check the SLA value Assert.assertEquals(dagManager.dagManagerThreads[queue].dagToSLA.get(dagId).longValue(), TimeUnit.SECONDS.toMillis(7L)); // check existence of dag in dagToJobs map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToJobs.containsKey(dagId), ERROR_MESSAGE); // verify cancelJob() of specProducer is called once // which means job cancellation was triggered AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1).assertTrue(new CancelPredicate(dag), ERROR_MESSAGE); // check removal of dag from dagToSLA map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> !dagManager.dagManagerThreads[queue].dagToSLA.containsKey(dagId), ERROR_MESSAGE); } @Test() void testOrphanFlowKill() throws Exception { Long flowExecutionId = System.currentTimeMillis() - TimeUnit.SECONDS.toMillis(10); Dag<JobExecutionPlan> dag = DagManagerTest.buildDag("6", flowExecutionId, "FINISH_RUNNING", 1); String dagId = DagManagerUtils.generateDagId(dag).toString(); int queue = DagManagerUtils.getDagQueueId(dag, dagNumThreads); // change config to set a small sla Config jobConfig = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig(); jobConfig = jobConfig .withValue(ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME, ConfigValueFactory.fromAnyRef("7")) .withValue(ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME_UNIT, ConfigValueFactory.fromAnyRef(TimeUnit.SECONDS.name())); dag.getStartNodes().get(0).getValue().getJobSpec().setConfig(jobConfig); // mock add spec dagManager.addDag(dag, true, true); // check existence of dag in dagToSLA map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToSLA.containsKey(dagId), ERROR_MESSAGE); Mockito.doReturn(DagManagerTest.getMockJobStatus("flow6", "group6", flowExecutionId, "group6", "job0", String.valueOf(ExecutionStatus.ORCHESTRATED))) .when(dagManager.getJobStatusRetriever()).getJobStatusesForFlowExecution("flow6", "group6", flowExecutionId, "job0", "group6"); // check existence of dag in dagToJobs map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> dagManager.dagManagerThreads[queue].dagToJobs.containsKey(dagId), ERROR_MESSAGE); // verify cancelJob() of specProducer is called once // which means job cancellation was triggered AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1).assertTrue(new CancelPredicate(dag), ERROR_MESSAGE); // check removal of dag from dagToSLA map AssertWithBackoff.create().maxSleepMs(5000).backoffFactor(1). assertTrue(input -> !dagManager.dagManagerThreads[queue].dagToSLA.containsKey(dagId), ERROR_MESSAGE); } @Test void slaConfigCheck() throws Exception { Dag<JobExecutionPlan> dag = DagManagerTest.buildDag("5", 123456783L, "FINISH_RUNNING", 1); Assert.assertEquals(DagManagerUtils.getFlowSLA(dag.getStartNodes().get(0)), DagManagerUtils.DEFAULT_FLOW_SLA_MILLIS); Config jobConfig = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig(); jobConfig = jobConfig .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, ConfigValueFactory.fromAnyRef("7")) .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME_UNIT, ConfigValueFactory.fromAnyRef(TimeUnit.SECONDS.name())); dag.getStartNodes().get(0).getValue().getJobSpec().setConfig(jobConfig); Assert.assertEquals(DagManagerUtils.getFlowSLA(dag.getStartNodes().get(0)), TimeUnit.SECONDS.toMillis(7L)); jobConfig = jobConfig .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME, ConfigValueFactory.fromAnyRef("8")) .withValue(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME_UNIT, ConfigValueFactory.fromAnyRef(TimeUnit.MINUTES.name())); dag.getStartNodes().get(0).getValue().getJobSpec().setConfig(jobConfig); Assert.assertEquals(DagManagerUtils.getFlowSLA(dag.getStartNodes().get(0)), TimeUnit.MINUTES.toMillis(8L)); } } class CancelPredicate implements Predicate<Void> { private final Dag<JobExecutionPlan> dag; public CancelPredicate(Dag<JobExecutionPlan> dag) { this.dag = dag; } @Override public boolean apply(@Nullable Void input) { try { verify(dag.getNodes().get(0).getValue().getSpecExecutor().getProducer().get()).cancelJob(any(), any()); } catch (Throwable e) { return false; } return true; } } class MockedDagManager extends DagManager { public MockedDagManager(Config config, boolean instrumentationEnabled) { super(config, createJobStatusRetriever(), Mockito.mock(SharedFlowMetricsSingleton.class), Mockito.mock(FlowStatusGenerator.class), Mockito.mock(FlowCatalog.class), instrumentationEnabled); } private static JobStatusRetriever createJobStatusRetriever() { JobStatusRetriever mockedJbStatusRetriever = Mockito.mock(JobStatusRetriever.class); Mockito.doReturn(Collections.emptyIterator()).when(mockedJbStatusRetriever). getJobStatusesForFlowExecution(anyString(), anyString(), anyLong(), anyString(), anyString()); return mockedJbStatusRetriever; } @Override DagStateStore createDagStateStore(Config config, Map<URI, TopologySpec> topologySpecMap) { DagStateStore mockedDagStateStore = Mockito.mock(DagStateStore.class); try { doNothing().when(mockedDagStateStore).writeCheckpoint(any()); } catch (IOException e) { throw new RuntimeException(e); } return mockedDagStateStore; } }
3,806
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/restli/FlowConfigUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.restli; import java.io.IOException; import java.util.Properties; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Maps; import com.linkedin.data.template.RequiredFieldNotPresentException; import com.linkedin.data.template.StringMap; import org.apache.gobblin.service.FlowConfig; import org.apache.gobblin.service.FlowConfigLoggedException; import org.apache.gobblin.service.FlowConfigResourceLocalHandler; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.Schedule; @Test public class FlowConfigUtilsTest { private void testFlowSpec(FlowConfig flowConfig) { try { FlowConfigResourceLocalHandler.createFlowSpecForConfig(flowConfig); } catch (FlowConfigLoggedException e) { Assert.fail("Should not get to here"); } } private void testSerDer(FlowConfig flowConfig) { try { String serialized = FlowConfigUtils.serializeFlowConfig(flowConfig); FlowConfig newFlowConfig = FlowConfigUtils.deserializeFlowConfig(serialized); Assert.assertTrue(testEqual(flowConfig, newFlowConfig)); } catch (IOException e) { Assert.fail("Should not get to here"); } } /** * Due to default value setting, flow config after deserialization might contain default value. * Only check f1.equals(f2) is not enough */ private boolean testEqual(FlowConfig f1, FlowConfig f2) { if (f1.equals(f2)) { return true; } // Check Id Assert.assertTrue(f1.hasId() == f2.hasId()); Assert.assertTrue(f1.getId().equals(f2.getId())); // Check Schedule Assert.assertTrue(f1.hasSchedule() == f2.hasSchedule()); if (f1.hasSchedule()) { Schedule s1 = f1.getSchedule(); Schedule s2 = f2.getSchedule(); Assert.assertTrue(s1.getCronSchedule().equals(s2.getCronSchedule())); Assert.assertTrue(s1.isRunImmediately().equals(s2.isRunImmediately())); } // Check Template URI Assert.assertTrue(f1.hasTemplateUris() == f2.hasTemplateUris()); if (f1.hasTemplateUris()) { Assert.assertTrue(f1.getTemplateUris().equals(f2.getTemplateUris())); } // Check Properties Assert.assertTrue(f1.hasProperties() == f2.hasProperties()); if (f1.hasProperties()) { Assert.assertTrue(f1.getProperties().equals(f2.getProperties())); } return true; } public void testFullFlowConfig() { FlowConfig flowConfig = new FlowConfig().setId(new FlowId() .setFlowName("SN_CRMSYNC") .setFlowGroup("DYNAMICS-USER-123456789")); flowConfig.setSchedule(new Schedule() .setCronSchedule("0 58 2/12 ? * * *") .setRunImmediately(Boolean.valueOf("true"))); flowConfig.setTemplateUris("FS:///my.template"); Properties properties = new Properties(); properties.put("gobblin.flow.sourceIdentifier", "dynamicsCrm"); properties.put("gobblin.flow.destinationIdentifier", "espresso"); flowConfig.setProperties(new StringMap(Maps.fromProperties(properties))); testFlowSpec(flowConfig); testSerDer(flowConfig); } public void testFlowConfigWithoutSchedule() { FlowConfig flowConfig = new FlowConfig().setId(new FlowId() .setFlowName("SN_CRMSYNC") .setFlowGroup("DYNAMICS-USER-123456789")); flowConfig.setTemplateUris("FS:///my.template"); Properties properties = new Properties(); properties.put("gobblin.flow.sourceIdentifier", "dynamicsCrm"); properties.put("gobblin.flow.destinationIdentifier", "espresso"); flowConfig.setProperties(new StringMap(Maps.fromProperties(properties))); testFlowSpec(flowConfig); testSerDer(flowConfig); } public void testFlowConfigWithDefaultRunImmediately() { FlowConfig flowConfig = new FlowConfig().setId(new FlowId() .setFlowName("SN_CRMSYNC") .setFlowGroup("DYNAMICS-USER-123456789")); flowConfig.setSchedule(new Schedule() .setCronSchedule("0 58 2/12 ? * * *")); flowConfig.setTemplateUris("FS:///my.template"); Properties properties = new Properties(); properties.put("gobblin.flow.sourceIdentifier", "dynamicsCrm"); properties.put("gobblin.flow.destinationIdentifier", "espresso"); flowConfig.setProperties(new StringMap(Maps.fromProperties(properties))); testFlowSpec(flowConfig); testSerDer(flowConfig); } public void testFlowConfigWithoutTemplateUri() { FlowConfig flowConfig = new FlowConfig().setId(new FlowId() .setFlowName("SN_CRMSYNC") .setFlowGroup("DYNAMICS-USER-123456789")); flowConfig.setSchedule(new Schedule() .setCronSchedule("0 58 2/12 ? * * *")); Properties properties = new Properties(); properties.put("gobblin.flow.sourceIdentifier", "dynamicsCrm"); properties.put("gobblin.flow.destinationIdentifier", "espresso"); flowConfig.setProperties(new StringMap(Maps.fromProperties(properties))); try { FlowConfigResourceLocalHandler.createFlowSpecForConfig(flowConfig); Assert.fail("Should not get to here"); } catch (RequiredFieldNotPresentException e) { Assert.assertTrue(true, "templateUri cannot be empty"); } testSerDer(flowConfig); } }
3,807
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flow/MultiHopFlowCompilerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flow; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.io.Files; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigParseOptions; import com.typesafe.config.ConfigSyntax; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.SpecProducer; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.runtime.spec_executorInstance.AbstractSpecExecutor; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraph; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode; import org.apache.gobblin.service.modules.flowgraph.DataNode; import org.apache.gobblin.service.modules.flowgraph.FlowEdge; import org.apache.gobblin.service.modules.flowgraph.FlowEdgeFactory; import org.apache.gobblin.service.modules.flowgraph.FlowGraph; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.service.modules.orchestration.AzkabanProjectConfig; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; import org.apache.gobblin.util.CompletedFuture; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; @Slf4j public class MultiHopFlowCompilerTest { private AtomicReference<FlowGraph> flowGraph; private MultiHopFlowCompiler specCompiler; private final String TESTDIR = "/tmp/mhCompiler/gitFlowGraphTestDir"; @BeforeClass public void setUp() throws URISyntaxException, IOException, ReflectiveOperationException, FlowEdgeFactory.FlowEdgeCreationException { //Create a FlowGraph this.flowGraph = new AtomicReference<>(new BaseFlowGraph()); //Add DataNodes to the graph from the node properties files URI dataNodesUri = MultiHopFlowCompilerTest.class.getClassLoader().getResource("flowgraph/datanodes").toURI(); FileSystem fs = FileSystem.get(dataNodesUri, new Configuration()); Path dataNodesPath = new Path(dataNodesUri); ConfigParseOptions options = ConfigParseOptions.defaults() .setSyntax(ConfigSyntax.PROPERTIES) .setAllowMissing(false); for (FileStatus fileStatus: fs.listStatus(dataNodesPath)) { try (InputStream is = fs.open(fileStatus.getPath())) { Config nodeConfig = ConfigFactory.parseReader(new InputStreamReader(is, Charsets.UTF_8), options); Class dataNodeClass = Class.forName(ConfigUtils .getString(nodeConfig, FlowGraphConfigurationKeys.DATA_NODE_CLASS, FlowGraphConfigurationKeys.DEFAULT_DATA_NODE_CLASS)); DataNode dataNode = (DataNode) GobblinConstructorUtils.invokeLongestConstructor(dataNodeClass, nodeConfig); this.flowGraph.get().addDataNode(dataNode); } } URI specExecutorCatalogUri = this.getClass().getClassLoader().getResource("topologyspec_catalog").toURI(); Map<URI, TopologySpec> topologySpecMap = buildTopologySpecMap(specExecutorCatalogUri); //Create a FSFlowTemplateCatalog instance URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI(); Properties properties = new Properties(); properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString()); Config config = ConfigFactory.parseProperties(properties); Config templateCatalogCfg = config .withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)); FSFlowTemplateCatalog flowCatalog = new FSFlowTemplateCatalog(templateCatalogCfg); //Add FlowEdges from the edge properties files URI flowEdgesURI = MultiHopFlowCompilerTest.class.getClassLoader().getResource("flowgraph/flowedges").toURI(); fs = FileSystem.get(flowEdgesURI, new Configuration()); Path flowEdgesPath = new Path(flowEdgesURI); for (FileStatus fileStatus: fs.listStatus(flowEdgesPath)) { log.warn(fileStatus.getPath().toString()); try (InputStream is = fs.open(fileStatus.getPath())) { Config flowEdgeConfig = ConfigFactory.parseReader(new InputStreamReader(is, Charsets.UTF_8), options); Class flowEdgeFactoryClass = Class.forName(ConfigUtils.getString(flowEdgeConfig, FlowGraphConfigurationKeys.FLOW_EDGE_FACTORY_CLASS, FlowGraphConfigurationKeys.DEFAULT_FLOW_EDGE_FACTORY_CLASS)); FlowEdgeFactory flowEdgeFactory = (FlowEdgeFactory) GobblinConstructorUtils.invokeLongestConstructor(flowEdgeFactoryClass, config); List<String> specExecutorNames = ConfigUtils.getStringList(flowEdgeConfig, FlowGraphConfigurationKeys.FLOW_EDGE_SPEC_EXECUTORS_KEY); List<SpecExecutor> specExecutors = new ArrayList<>(); for (String specExecutorName: specExecutorNames) { specExecutors.add(topologySpecMap.get(new URI(specExecutorName)).getSpecExecutor()); } FlowEdge edge = flowEdgeFactory.createFlowEdge(flowEdgeConfig, flowCatalog, specExecutors); this.flowGraph.get().addFlowEdge(edge); } } this.specCompiler = new MultiHopFlowCompiler(config, this.flowGraph); } /** * A helper method to return a {@link TopologySpec} map, given a {@link org.apache.gobblin.runtime.spec_catalog.TopologyCatalog}. * @param topologyCatalogUri pointing to the location of the {@link org.apache.gobblin.runtime.spec_catalog.TopologyCatalog} * @return a {@link TopologySpec} map. */ public static Map<URI, TopologySpec> buildTopologySpecMap(URI topologyCatalogUri) throws IOException, URISyntaxException, ReflectiveOperationException { FileSystem fs = FileSystem.get(topologyCatalogUri, new Configuration()); PathFilter extensionFilter = file -> { for (String extension : Lists.newArrayList(".properties")) { if (file.getName().endsWith(extension)) { return true; } } return false; }; Map<URI, TopologySpec> topologySpecMap = new HashMap<>(); for (FileStatus fileStatus : fs.listStatus(new Path(topologyCatalogUri.getPath()), extensionFilter)) { URI topologySpecUri = new URI(Files.getNameWithoutExtension(fileStatus.getPath().getName())); Config topologyConfig = ConfigFactory.parseFile(new File(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString())); Class specExecutorClass = Class.forName(topologyConfig.getString(ServiceConfigKeys.SPEC_EXECUTOR_KEY)); SpecExecutor specExecutor = (SpecExecutor) GobblinConstructorUtils.invokeLongestConstructor(specExecutorClass, topologyConfig); TopologySpec.Builder topologySpecBuilder = TopologySpec .builder(topologySpecUri) .withConfig(topologyConfig) .withDescription("") .withVersion("1") .withSpecExecutor(specExecutor); TopologySpec topologySpec = topologySpecBuilder.build(); topologySpecMap.put(topologySpecUri, topologySpec); } return topologySpecMap; } private FlowSpec createFlowSpec(String flowConfigResource, String source, String destination, boolean applyRetention, boolean applyRetentionOnInput) throws IOException, URISyntaxException { //Create a flow spec Properties flowProperties = new Properties(); flowProperties.put(ConfigurationKeys.JOB_SCHEDULE_KEY, "* * * * *"); flowProperties.put(ConfigurationKeys.FLOW_GROUP_KEY, "testFlowGroup"); flowProperties.put(ConfigurationKeys.FLOW_NAME_KEY, "testFlowName"); flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, source); flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, destination); flowProperties.put(ConfigurationKeys.FLOW_APPLY_RETENTION, Boolean.toString(applyRetention)); flowProperties.put(ConfigurationKeys.FLOW_APPLY_INPUT_RETENTION, Boolean.toString(applyRetentionOnInput)); Config flowConfig = ConfigUtils.propertiesToConfig(flowProperties); //Get the input/output dataset config from a file URI flowConfigUri = MultiHopFlowCompilerTest.class.getClassLoader().getResource(flowConfigResource).toURI(); Path flowConfigPath = new Path(flowConfigUri); FileSystem fs1 = FileSystem.get(flowConfigUri, new Configuration()); try (InputStream is = fs1.open(flowConfigPath)) { Config datasetConfig = ConfigFactory.parseReader(new InputStreamReader(is, Charset.defaultCharset())); flowConfig = flowConfig.withFallback(datasetConfig).resolve(); } FlowSpec.Builder flowSpecBuilder = null; flowSpecBuilder = FlowSpec.builder(new Path("/tmp/flowSpecCatalog").toUri()) .withConfig(flowConfig) .withDescription("dummy description") .withVersion(FlowSpec.Builder.DEFAULT_VERSION); FlowSpec spec = flowSpecBuilder.build(); return spec; } @Test public void testCompileFlow() throws URISyntaxException, IOException { FlowSpec spec = createFlowSpec("flow/flow1.conf", "LocalFS-1", "ADLS-1", false, false); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); Assert.assertEquals(jobDag.getNodes().size(), 4); Assert.assertEquals(jobDag.getStartNodes().size(), 1); Assert.assertEquals(jobDag.getEndNodes().size(), 1); //Get the 1st hop - Distcp from "LocalFS-1" to "HDFS-1" DagNode<JobExecutionPlan> startNode = jobDag.getStartNodes().get(0); JobExecutionPlan jobSpecWithExecutor = startNode.getValue(); JobSpec jobSpec = jobSpecWithExecutor.getJobSpec(); //Ensure the resolved job config for the first hop has the correct substitutions. Config jobConfig = jobSpec.getConfig(); String flowGroup = "testFlowGroup"; String flowName = "testFlowName"; String expectedJobName1 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "Distcp", "LocalFS-1", "HDFS-1", "localToHdfs"); String jobName1 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName1.startsWith(expectedJobName1)); String from = jobConfig.getString("from"); String to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/out/testTeam/testDataset"); Assert.assertEquals(to, "/data/out/testTeam/testDataset"); String sourceFsUri = jobConfig.getString("fs.uri"); Assert.assertEquals(sourceFsUri, "file:///"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), sourceFsUri); Assert.assertEquals(jobConfig.getString("state.store.fs.uri"), sourceFsUri); String targetFsUri = jobConfig.getString("target.filebased.fs.uri"); Assert.assertEquals(targetFsUri, "hdfs://hadoopnn01.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("writer.fs.uri"), targetFsUri); Assert.assertEquals(new Path(jobConfig.getString("gobblin.dataset.pattern")), new Path(from)); Assert.assertEquals(jobConfig.getString("data.publisher.final.dir"), to); Assert.assertEquals(jobConfig.getString("type"), "java"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.runtime.local.LocalJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "LOCAL"); //Ensure the spec executor has the correct configurations SpecExecutor specExecutor = jobSpecWithExecutor.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "fs:///"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor"); //Get the 2nd hop - "HDFS-1 to HDFS-1 : convert avro to json and encrypt". Ensure config has correct substitutions. Assert.assertEquals(jobDag.getChildren(startNode).size(), 1); DagNode<JobExecutionPlan> secondHopNode = jobDag.getChildren(startNode).get(0); jobSpecWithExecutor = secondHopNode.getValue(); jobConfig = jobSpecWithExecutor.getJobSpec().getConfig(); String expectedJobName2 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "ConvertToJsonAndEncrypt", "HDFS-1", "HDFS-1", "hdfsConvertToJsonAndEncrypt"); String jobName2 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName2.startsWith(expectedJobName2)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName1); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/out/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.data.directory"), from); Assert.assertEquals(jobConfig.getString("data.publisher.final.dir"), to); specExecutor = jobSpecWithExecutor.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban01.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Get the 3rd hop - "Distcp HDFS-1 to HDFS-3" Assert.assertEquals(jobDag.getChildren(secondHopNode).size(), 1); DagNode<JobExecutionPlan> thirdHopNode = jobDag.getChildren(secondHopNode).get(0); jobSpecWithExecutor = thirdHopNode.getValue(); jobConfig = jobSpecWithExecutor.getJobSpec().getConfig(); String expectedJobName3 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "Distcp", "HDFS-1", "HDFS-3", "hdfsToHdfs"); String jobName3 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName3.startsWith(expectedJobName3)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName2); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), "hdfs://hadoopnn01.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("target.filebased.fs.uri"), "hdfs://hadoopnn03.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("type"), "hadoopJava"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.azkaban.AzkabanJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "MAPREDUCE"); //Ensure the spec executor has the correct configurations specExecutor = jobSpecWithExecutor.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban01.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Get the 4th hop - "Distcp from HDFS-3 to ADLS-1" Assert.assertEquals(jobDag.getChildren(thirdHopNode).size(), 1); DagNode<JobExecutionPlan> fourthHopNode = jobDag.getChildren(thirdHopNode).get(0); jobSpecWithExecutor = fourthHopNode.getValue(); jobConfig = jobSpecWithExecutor.getJobSpec().getConfig(); String expectedJobName4 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "DistcpToADL", "HDFS-3", "ADLS-1", "hdfsToAdl"); String jobName4 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName4.startsWith(expectedJobName4)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName3); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), "hdfs://hadoopnn03.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("target.filebased.fs.uri"), "adl://azuredatalakestore.net/"); Assert.assertEquals(jobConfig.getString("type"), "hadoopJava"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.azkaban.AzkabanJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "MAPREDUCE"); Assert.assertEquals(jobConfig.getString("dfs.adls.oauth2.client.id"), "1234"); Assert.assertEquals(jobConfig.getString("writer.encrypted.dfs.adls.oauth2.credential"), "credential"); Assert.assertEquals(jobConfig.getString("encrypt.key.loc"), "/user/testUser/master.password"); //Ensure the spec executor has the correct configurations specExecutor = jobSpecWithExecutor.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban03.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Ensure the fourth hop is the last Assert.assertEquals(jobDag.getEndNodes().get(0), fourthHopNode); } @Test (dependsOnMethods = "testCompileFlow") public void testCompileFlowWithRetention() throws URISyntaxException, IOException { FlowSpec spec = createFlowSpec("flow/flow1.conf", "LocalFS-1", "ADLS-1", true, true); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); Assert.assertEquals(jobDag.getNodes().size(), 9); Assert.assertEquals(jobDag.getStartNodes().size(), 2); Assert.assertEquals(jobDag.getEndNodes().size(), 5); String flowGroup = "testFlowGroup"; String flowName = "testFlowName"; List<DagNode<JobExecutionPlan>> currentHopNodes = jobDag.getStartNodes(); List<String> expectedJobNames = Lists.newArrayList("SnapshotRetention", "Distcp", "SnapshotRetention", "ConvertToJsonAndEncrypt", "SnapshotRetention" , "Distcp", "SnapshotRetention", "DistcpToADL", "SnapshotRetention"); List<String> sourceNodes = Lists.newArrayList("LocalFS-1", "LocalFS-1", "HDFS-1", "HDFS-1", "HDFS-1", "HDFS-1", "HDFS-3", "HDFS-3", "ADLS-1"); List<String> destinationNodes = Lists.newArrayList("LocalFS-1", "HDFS-1", "HDFS-1", "HDFS-1", "HDFS-1", "HDFS-3", "HDFS-3", "ADLS-1", "ADLS-1"); List<String> edgeNames = Lists.newArrayList("localRetention", "localToHdfs", "hdfsRetention", "hdfsConvertToJsonAndEncrypt", "hdfsRetention", "hdfsToHdfs", "hdfsRetention", "hdfsToAdl", "hdfsRemoteRetention"); List<DagNode<JobExecutionPlan>> nextHopNodes = new ArrayList<>(); for (int i = 0; i < 9; i += 2) { if (i < 8) { Assert.assertEquals(currentHopNodes.size(), 2); } else { Assert.assertEquals(currentHopNodes.size(), 1); } Set<String> jobNames = new HashSet<>(); jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, expectedJobNames.get(i), sourceNodes.get(i), destinationNodes.get(i), edgeNames.get(i))); if (i < 8) { jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, expectedJobNames.get(i + 1), sourceNodes.get(i + 1), destinationNodes.get(i + 1), edgeNames.get(i + 1))); } for (DagNode<JobExecutionPlan> dagNode : currentHopNodes) { Config jobConfig = dagNode.getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobNames.stream().anyMatch(jobName::startsWith)); log.warn(jobName); nextHopNodes.addAll(jobDag.getChildren(dagNode)); } currentHopNodes = nextHopNodes; nextHopNodes = new ArrayList<>(); } Assert.assertEquals(nextHopNodes.size(), 0); } @Test (dependsOnMethods = "testCompileFlowWithRetention") public void testCompileFlowAfterFirstEdgeDeletion() throws URISyntaxException, IOException { //Delete the self edge on HDFS-1 that performs convert-to-json-and-encrypt. this.flowGraph.get().deleteFlowEdge("HDFS-1_HDFS-1_hdfsConvertToJsonAndEncrypt"); FlowSpec spec = createFlowSpec("flow/flow1.conf", "LocalFS-1", "ADLS-1", false, false); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); Assert.assertEquals(jobDag.getNodes().size(), 4); Assert.assertEquals(jobDag.getStartNodes().size(), 1); Assert.assertEquals(jobDag.getEndNodes().size(), 1); //Get the 1st hop - Distcp from "LocalFS-1" to "HDFS-2" DagNode<JobExecutionPlan> startNode = jobDag.getStartNodes().get(0); JobExecutionPlan jobExecutionPlan = startNode.getValue(); JobSpec jobSpec = jobExecutionPlan.getJobSpec(); //Ensure the resolved job config for the first hop has the correct substitutions. Config jobConfig = jobSpec.getConfig(); String flowGroup = "testFlowGroup"; String flowName = "testFlowName"; String expectedJobName1 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "Distcp", "LocalFS-1", "HDFS-2", "localToHdfs"); String jobName1 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName1.startsWith(expectedJobName1)); String from = jobConfig.getString("from"); String to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/out/testTeam/testDataset"); Assert.assertEquals(to, "/data/out/testTeam/testDataset"); String sourceFsUri = jobConfig.getString("fs.uri"); Assert.assertEquals(sourceFsUri, "file:///"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), sourceFsUri); Assert.assertEquals(jobConfig.getString("state.store.fs.uri"), sourceFsUri); String targetFsUri = jobConfig.getString("target.filebased.fs.uri"); Assert.assertEquals(targetFsUri, "hdfs://hadoopnn02.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("writer.fs.uri"), targetFsUri); Assert.assertEquals(new Path(jobConfig.getString("gobblin.dataset.pattern")), new Path(from)); Assert.assertEquals(jobConfig.getString("data.publisher.final.dir"), to); Assert.assertEquals(jobConfig.getString("type"), "java"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.runtime.local.LocalJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "LOCAL"); //Ensure the spec executor has the correct configurations SpecExecutor specExecutor = jobExecutionPlan.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "fs:///"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor"); //Get the 2nd hop - "HDFS-2 to HDFS-2 : convert avro to json and encrypt" Assert.assertEquals(jobDag.getChildren(startNode).size(), 1); DagNode<JobExecutionPlan> secondHopNode = jobDag.getChildren(startNode).get(0); jobExecutionPlan = secondHopNode.getValue(); jobConfig = jobExecutionPlan.getJobSpec().getConfig(); String expectedJobName2 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "ConvertToJsonAndEncrypt", "HDFS-2", "HDFS-2", "hdfsConvertToJsonAndEncrypt"); String jobName2 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName2.startsWith(expectedJobName2)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName1); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/out/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.data.directory"), from); Assert.assertEquals(jobConfig.getString("data.publisher.final.dir"), to); specExecutor = jobExecutionPlan.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban02.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Get the 3rd hop - "Distcp HDFS-2 to HDFS-4" Assert.assertEquals(jobDag.getChildren(secondHopNode).size(), 1); DagNode<JobExecutionPlan> thirdHopNode = jobDag.getChildren(secondHopNode).get(0); jobExecutionPlan = thirdHopNode.getValue(); jobConfig = jobExecutionPlan.getJobSpec().getConfig(); String expectedJobName3 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "Distcp", "HDFS-2", "HDFS-4", "hdfsToHdfs"); String jobName3 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName3.startsWith(expectedJobName3)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName2); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), "hdfs://hadoopnn02.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("target.filebased.fs.uri"), "hdfs://hadoopnn04.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("type"), "hadoopJava"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.azkaban.AzkabanJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "MAPREDUCE"); //Ensure the spec executor has the correct configurations specExecutor = jobExecutionPlan.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban02.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Get the 4th hop - "Distcp from HDFS-4 to ADLS-1" Assert.assertEquals(jobDag.getChildren(thirdHopNode).size(), 1); DagNode<JobExecutionPlan> fourthHopNode = jobDag.getChildren(thirdHopNode).get(0); jobExecutionPlan = fourthHopNode.getValue(); jobConfig = jobExecutionPlan.getJobSpec().getConfig(); String expectedJobName4 = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join(flowGroup, flowName, "DistcpToADL", "HDFS-4", "ADLS-1", "hdfsToAdl"); String jobName4 = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName4.startsWith(expectedJobName4)); Assert.assertEquals(jobConfig.getString(ConfigurationKeys.JOB_DEPENDENCIES), jobName3); from = jobConfig.getString("from"); to = jobConfig.getString("to"); Assert.assertEquals(from, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(to, "/data/encrypted/testTeam/testDataset"); Assert.assertEquals(jobConfig.getString("source.filebased.fs.uri"), "hdfs://hadoopnn04.grid.linkedin.com:8888/"); Assert.assertEquals(jobConfig.getString("target.filebased.fs.uri"), "adl://azuredatalakestore.net/"); Assert.assertEquals(jobConfig.getString("type"), "hadoopJava"); Assert.assertEquals(jobConfig.getString("job.class"), "org.apache.gobblin.azkaban.AzkabanJobLauncher"); Assert.assertEquals(jobConfig.getString("launcher.type"), "MAPREDUCE"); Assert.assertEquals(jobConfig.getString("dfs.adls.oauth2.client.id"), "1234"); Assert.assertEquals(jobConfig.getString("writer.encrypted.dfs.adls.oauth2.credential"), "credential"); Assert.assertEquals(jobConfig.getString("encrypt.key.loc"), "/user/testUser/master.password"); //Ensure the spec executor has the correct configurations specExecutor = jobExecutionPlan.getSpecExecutor(); Assert.assertEquals(specExecutor.getUri().toString(), "https://azkaban04.gobblin.net:8443"); Assert.assertEquals(specExecutor.getClass().getCanonicalName(), "org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest.TestAzkabanSpecExecutor"); //Ensure the fourth hop is the last Assert.assertEquals(jobDag.getEndNodes().get(0), fourthHopNode); } @Test (dependsOnMethods = "testCompileFlowAfterFirstEdgeDeletion") public void testCompileFlowAfterSecondEdgeDeletion() throws URISyntaxException, IOException { //Delete the self edge on HDFS-2 that performs convert-to-json-and-encrypt. this.flowGraph.get().deleteFlowEdge("HDFS-2_HDFS-2_hdfsConvertToJsonAndEncrypt"); FlowSpec spec = createFlowSpec("flow/flow1.conf", "LocalFS-1", "ADLS-1", false, false); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); //Ensure no path to destination. Assert.assertEquals(jobDag, null); } @Test (dependsOnMethods = "testCompileFlowAfterSecondEdgeDeletion") public void testCompileFlowSingleHop() throws IOException, URISyntaxException { FlowSpec spec = createFlowSpec("flow/flow2.conf", "HDFS-1", "HDFS-3", false, false); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); Assert.assertEquals(jobDag.getNodes().size(), 1); Assert.assertEquals(jobDag.getStartNodes().size(), 1); Assert.assertEquals(jobDag.getEndNodes().size(), 1); Assert.assertEquals(jobDag.getStartNodes().get(0), jobDag.getEndNodes().get(0)); //Ensure hop is from HDFS-1 to HDFS-3 i.e. jobName == "testFlowGroup_testFlowName_Distcp_HDFS-1_HDFS-3". DagNode<JobExecutionPlan> dagNode = jobDag.getStartNodes().get(0); Config jobConfig = dagNode.getValue().getJobSpec().getConfig(); String expectedJobName = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "HDFS-1", "HDFS-3", "hdfsToHdfs"); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName.startsWith(expectedJobName)); } @Test (dependsOnMethods = "testCompileFlowSingleHop") public void testMulticastPath() throws IOException, URISyntaxException { FlowSpec spec = createFlowSpec("flow/flow2.conf", "LocalFS-1", "HDFS-3,HDFS-4", false, false); Dag<JobExecutionPlan> jobDag = this.specCompiler.compileFlow(spec); Assert.assertEquals(jobDag.getNodes().size(), 4); Assert.assertEquals(jobDag.getEndNodes().size(), 2); Assert.assertEquals(jobDag.getStartNodes().size(), 2); //First hop must be from LocalFS to HDFS-1 and HDFS-2 Set<String> jobNames = new HashSet<>(); jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "LocalFS-1", "HDFS-1", "localToHdfs")); jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "LocalFS-1", "HDFS-2", "localToHdfs")); for (DagNode<JobExecutionPlan> dagNode : jobDag.getStartNodes()) { Config jobConfig = dagNode.getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobNames.stream().anyMatch(jobName::startsWith)); } //Second hop must be from HDFS-1/HDFS-2 to HDFS-3/HDFS-4 respectively. jobNames = new HashSet<>(); jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "HDFS-1", "HDFS-3", "hdfsToHdfs")); jobNames.add(Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "HDFS-2", "HDFS-4", "hdfsToHdfs")); for (DagNode<JobExecutionPlan> dagNode : jobDag.getStartNodes()) { List<DagNode<JobExecutionPlan>> nextNodes = jobDag.getChildren(dagNode); Assert.assertEquals(nextNodes.size(), 1); Config jobConfig = nextNodes.get(0).getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobNames.stream().anyMatch(jobName::startsWith)); } } @Test (dependsOnMethods = "testMulticastPath") public void testCompileMultiDatasetFlow() throws Exception { FlowSpec spec = createFlowSpec("flow/flow3.conf", "HDFS-1", "HDFS-3", true, false); Dag<JobExecutionPlan> dag = specCompiler.compileFlow(spec); // Should be 3 parallel jobs, one for each dataset, with copy -> retention Assert.assertEquals(dag.getNodes().size(), 6); Assert.assertEquals(dag.getEndNodes().size(), 3); Assert.assertEquals(dag.getStartNodes().size(), 3); String copyJobName = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "HDFS-1", "HDFS-3", "hdfsToHdfs"); for (DagNode<JobExecutionPlan> dagNode : dag.getStartNodes()) { Config jobConfig = dagNode.getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName.startsWith(copyJobName)); } String retentionJobName = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "SnapshotRetention", "HDFS-3", "HDFS-3", "hdfsRetention"); for (DagNode<JobExecutionPlan> dagNode : dag.getEndNodes()) { Config jobConfig = dagNode.getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName.startsWith(retentionJobName)); } } @Test (dependsOnMethods = "testCompileMultiDatasetFlow") public void testCompileCombinedDatasetFlow() throws Exception { FlowSpec spec = createFlowSpec("flow/flow4.conf", "HDFS-1", "HDFS-3", true, false); Dag<JobExecutionPlan> dag = specCompiler.compileFlow(spec); // Should be 2 jobs, each containing 3 datasets Assert.assertEquals(dag.getNodes().size(), 2); Assert.assertEquals(dag.getEndNodes().size(), 1); Assert.assertEquals(dag.getStartNodes().size(), 1); String copyJobName = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "Distcp", "HDFS-1", "HDFS-3", "hdfsToHdfs"); Config jobConfig = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig(); String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName.startsWith(copyJobName)); Assert.assertTrue(jobConfig.getString(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY).endsWith("{dataset0,dataset1,dataset2}")); String retentionJobName = Joiner.on(JobExecutionPlan.Factory.JOB_NAME_COMPONENT_SEPARATION_CHAR). join("testFlowGroup", "testFlowName", "SnapshotRetention", "HDFS-3", "HDFS-3", "hdfsRetention"); Config jobConfig2 = dag.getEndNodes().get(0).getValue().getJobSpec().getConfig(); String jobName2 = jobConfig2.getString(ConfigurationKeys.JOB_NAME_KEY); Assert.assertTrue(jobName2.startsWith(retentionJobName)); Assert.assertTrue(jobConfig2.getString(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY).endsWith("{dataset0,dataset1,dataset2}")); // Should be empty since compilation was successful Assert.assertEquals(spec.getCompilationErrors().size(), 0); } @Test (dependsOnMethods = "testCompileCombinedDatasetFlow") public void testUnresolvedFlow() throws Exception { FlowSpec spec = createFlowSpec("flow/flow5.conf", "HDFS-1", "HDFS-3", false, false); Dag<JobExecutionPlan> dag = specCompiler.compileFlow(spec); Assert.assertNull(dag); Assert.assertTrue(spec.getCompilationErrors().stream().anyMatch(s -> s.errorMessage.contains(AzkabanProjectConfig.USER_TO_PROXY))); } @Test (dependsOnMethods = "testUnresolvedFlow") public void testMissingSourceNodeError() throws Exception { FlowSpec spec = createFlowSpec("flow/flow5.conf", "HDFS-NULL", "HDFS-3", false, false); Dag<JobExecutionPlan> dag = specCompiler.compileFlow(spec); Assert.assertEquals(dag, null); Assert.assertEquals(spec.getCompilationErrors().size(), 1); spec.getCompilationErrors().stream().anyMatch(s -> s.errorMessage.contains("Flowgraph does not have a node with id")); } @Test (dependsOnMethods = "testMissingSourceNodeError") public void testMissingDestinationNodeError() throws Exception { FlowSpec spec = createFlowSpec("flow/flow5.conf", "HDFS-1", "HDFS-NULL", false, false); Dag<JobExecutionPlan> dag = specCompiler.compileFlow(spec); Assert.assertNull(dag); Assert.assertEquals(spec.getCompilationErrors().size(), 1); spec.getCompilationErrors().stream().anyMatch(s -> s.errorMessage.contains("Flowgraph does not have a node with id")); } private String formNodeFilePath(File flowGraphDir, String groupDir, String fileName) { return flowGraphDir.getName() + SystemUtils.FILE_SEPARATOR + groupDir + SystemUtils.FILE_SEPARATOR + fileName; } private void cleanUpDir(String dir) throws IOException { File dirToDelete = new File(dir); if (dirToDelete.exists()) { FileUtils.deleteDirectory(new File(dir)); } } @AfterClass public void tearDown() throws IOException { cleanUpDir(TESTDIR); try { this.specCompiler.getServiceManager().stopAsync().awaitStopped(5, TimeUnit.SECONDS); } catch (Exception e) { log.warn("Could not stop Service Manager"); } } public static class TestAzkabanSpecExecutor extends AbstractSpecExecutor { // Executor Instance protected final Config config; private SpecProducer<Spec> azkabanSpecProducer; public TestAzkabanSpecExecutor(Config config) { super(config); this.config = config; } @Override protected void startUp() throws Exception { //Do nothing } @Override protected void shutDown() throws Exception { //Do nothing } @Override public Future<String> getDescription() { return new CompletedFuture<>("SimpleSpecExecutorInstance with URI: " + specExecutorInstanceUri, null); } @Override public Future<? extends SpecProducer<Spec>> getProducer() { return new CompletedFuture<>(this.azkabanSpecProducer, null); } @Override public Future<Config> getConfig() { return new CompletedFuture<>(config, null); } @Override public Future<String> getHealth() { return new CompletedFuture<>("Healthy", null); } } }
3,808
0
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flow/FlowGraphPathTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flow; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory; public class FlowGraphPathTest { /** * A method to create a {@link Dag <JobExecutionPlan>}. * @return a Dag. */ public Dag<JobExecutionPlan> buildDag(int numNodes, int startNodeId, boolean isForkable) throws URISyntaxException { List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>(); Config baseConfig = ConfigBuilder.create(). addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "group0"). addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flow0"). addPrimitive(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, System.currentTimeMillis()). addPrimitive(ConfigurationKeys.JOB_GROUP_KEY, "group0").build(); for (int i = startNodeId; i < startNodeId + numNodes; i++) { String suffix = Integer.toString(i); Config jobConfig = baseConfig.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("job" + suffix)); if (isForkable && (i == startNodeId + numNodes - 1)) { jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_FORK_ON_CONCAT, ConfigValueFactory.fromAnyRef(true)); } if (i > startNodeId) { jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef("job" + (i - 1))); } JobSpec js = JobSpec.builder("test_job" + suffix).withVersion(suffix).withConfig(jobConfig). withTemplate(new URI("job" + suffix)).build(); SpecExecutor specExecutor = InMemorySpecExecutor.createDummySpecExecutor(new URI("job" + i)); JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(js, specExecutor); jobExecutionPlans.add(jobExecutionPlan); } return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans); } @Test public void testConcatenate() throws URISyntaxException { //Dag1: "job0->job1", Dag2: "job2->job3" Dag<JobExecutionPlan> dag1 = buildDag(2, 0, false); Dag<JobExecutionPlan> dag2 = buildDag(2, 2, false); Dag<JobExecutionPlan> dagNew = FlowGraphPath.concatenate(dag1, dag2); //Expected result: "job0"->"job1"->"job2"->"job3" Assert.assertEquals(dagNew.getStartNodes().size(), 1); Assert.assertEquals(dagNew.getEndNodes().size(), 1); Assert.assertEquals(dagNew.getNodes().size(), 4); Assert.assertEquals(dagNew.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job0"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job3"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_DEPENDENCIES), "job2"); //Dag1: "job0", Dag2: "job1->job2", "job0" forkable dag1 = buildDag(1, 0, true); dag2 = buildDag(2, 1, false); dagNew = FlowGraphPath.concatenate(dag1, dag2); //Expected result: "job0", "job1" -> "job2" Assert.assertEquals(dagNew.getStartNodes().size(), 2); Assert.assertEquals(dagNew.getEndNodes().size(), 2); Assert.assertEquals(dagNew.getNodes().size(), 3); Assert.assertEquals(dagNew.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job0"); Assert.assertEquals(dagNew.getStartNodes().get(1).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job1"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job2"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_DEPENDENCIES), "job1"); Assert.assertEquals(dagNew.getEndNodes().get(1).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job0"); Assert.assertFalse(dagNew.getEndNodes().get(1).getValue().getJobSpec().getConfig().hasPath(ConfigurationKeys.JOB_DEPENDENCIES)); //Dag1: "job0->job1", Dag2: "job2->job3", "job1" forkable dag1 = buildDag(2, 0, true); dag2 = buildDag(2, 2, false); dagNew = FlowGraphPath.concatenate(dag1, dag2); //Expected result: "job0" -> "job1" // \-> "job2" -> "job3" Assert.assertEquals(dagNew.getStartNodes().size(), 1); Assert.assertEquals(dagNew.getEndNodes().size(), 2); Assert.assertEquals(dagNew.getNodes().size(), 4); Assert.assertEquals(dagNew.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job0"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job3"); Assert.assertEquals(dagNew.getEndNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_DEPENDENCIES), "job2"); Assert.assertEquals(dagNew.getEndNodes().get(1).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job1"); Assert.assertEquals(dagNew.getEndNodes().get(1).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_DEPENDENCIES), "job0"); //Dag1: "job0", Dag2: "job1" dag1 = buildDag(1, 0, true); dag2 = buildDag(1, 1, false); dagNew = FlowGraphPath.concatenate(dag1, dag2); //Expected result: "job0","job1" Assert.assertEquals(dagNew.getStartNodes().size(), 2); Assert.assertEquals(dagNew.getEndNodes().size(), 2); Assert.assertEquals(dagNew.getNodes().size(), 2); Assert.assertEquals(dagNew.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job0"); Assert.assertEquals(dagNew.getStartNodes().get(1).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), "job1"); Assert.assertFalse(dagNew.getStartNodes().get(1).getValue().getJobSpec().getConfig().hasPath(ConfigurationKeys.JOB_DEPENDENCIES)); Assert.assertFalse(dagNew.getStartNodes().get(1).getValue().getJobSpec().getConfig().hasPath(ConfigurationKeys.JOB_DEPENDENCIES)); } }
3,809
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GitConfigListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.net.URI; import java.util.Set; import lombok.extern.slf4j.Slf4j; import org.eclipse.jgit.diff.DiffEntry; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.collect.Sets; import com.google.common.io.Files; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.spec_store.FSSpecStore; import org.apache.gobblin.util.PullFileLoader; /** * Listener for {@link GitConfigMonitor} to apply changes from Git to a {@link FlowCatalog} for adding and removing jobs */ @Slf4j public class GitConfigListener implements GitDiffListener { private static final int CONFIG_FILE_DEPTH = 3; private static final String SPEC_DESCRIPTION = "Git-based flow config"; private static final String SPEC_VERSION = FlowSpec.Builder.DEFAULT_VERSION; private final FlowCatalog flowCatalog; final String repositoryDir; final String configBaseFolderName; final PullFileLoader pullFileLoader; final Set<String> javaPropsExtensions; final Set<String> hoconFileExtensions; private final Config emptyConfig = ConfigFactory.empty(); public GitConfigListener(FlowCatalog flowCatalog, String repositoryDir, String configBaseFolderName, String javaPropsExtentions, String hoconFileExtentions) { this.flowCatalog = flowCatalog; this.configBaseFolderName = configBaseFolderName; this.repositoryDir = repositoryDir; Path folderPath = new Path(repositoryDir, configBaseFolderName); this.javaPropsExtensions = Sets.newHashSet(javaPropsExtentions.split(",")); this.hoconFileExtensions = Sets.newHashSet(hoconFileExtentions.split(",")); try { this.pullFileLoader = new PullFileLoader(folderPath, FileSystem.get(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), this.javaPropsExtensions, this.hoconFileExtensions); } catch (IOException e) { throw new RuntimeException("Could not create pull file loader", e); } } /** * Add a {@link FlowSpec} for an added, updated, or modified flow config * @param change */ @Override public void addChange(DiffEntry change) { if (checkConfigFilePath(change.getNewPath())) { Path configFilePath = new Path(this.repositoryDir, change.getNewPath()); try { Config flowConfig = loadConfigFileWithFlowNameOverrides(configFilePath); this.flowCatalog.put(FlowSpec.builder() .withConfig(flowConfig) .withVersion(SPEC_VERSION) .withDescription(SPEC_DESCRIPTION) .build()); } catch (Throwable e) { log.warn("Could not load config file: " + configFilePath); } } } /** * remove a {@link FlowSpec} for a deleted or renamed flow config * @param change */ @Override public void removeChange(DiffEntry change) { if (checkConfigFilePath(change.getOldPath())) { Path configFilePath = new Path(this.repositoryDir, change.getOldPath()); String flowName = FSSpecStore.getSpecName(configFilePath); String flowGroup = FSSpecStore.getSpecGroup(configFilePath); // build a dummy config to get the proper URI for delete Config dummyConfig = ConfigBuilder.create() .addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, flowGroup) .addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, flowName) .build(); FlowSpec spec = FlowSpec.builder() .withConfig(dummyConfig) .withVersion(SPEC_VERSION) .withDescription(SPEC_DESCRIPTION) .build(); this.flowCatalog.remove(spec.getUri()); } } /** * check whether the file has the proper naming and hierarchy * @param configFilePath the relative path from the repo root * @return false if the file does not conform */ private boolean checkConfigFilePath(String configFilePath) { // The config needs to stored at configDir/flowGroup/flowName.(pull|job|json|conf) Path configFile = new Path(configFilePath); String fileExtension = Files.getFileExtension(configFile.getName()); if (configFile.depth() != CONFIG_FILE_DEPTH || !configFile.getParent().getParent().getName().equals(configBaseFolderName) || !(PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension) || PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension))) { log.warn("Changed file does not conform to directory structure and file name format, skipping: " + configFilePath); return false; } return true; } /** * Load the config file and override the flow name and flow path properties with the names from the file path * @param configFilePath path of the config file relative to the repository root * @return the configuration object * @throws IOException */ private Config loadConfigFileWithFlowNameOverrides(Path configFilePath) throws IOException { Config flowConfig = this.pullFileLoader.loadPullFile(configFilePath, emptyConfig, false); String flowName = FSSpecStore.getSpecName(configFilePath); String flowGroup = FSSpecStore.getSpecGroup(configFilePath); return flowConfig.withValue(ConfigurationKeys.FLOW_NAME_KEY, ConfigValueFactory.fromAnyRef(flowName)) .withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef(flowGroup)); } }
3,810
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/ChangeMonitorUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import com.google.common.cache.LoadingCache; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.metrics.ContextAwareMeter; @Slf4j public final class ChangeMonitorUtils { private ChangeMonitorUtils() { return; } /** * Performs checks for duplicate messages, heartbeat message types, or null dag action types all of which cannot or * should not be processed. Returns true if the pre-conditions above don't apply, and we should proceed processing * the change event */ public static boolean isValidAndUniqueMessage(String changeIdentifier, String operation, String timestamp, LoadingCache<String, String> cache, ContextAwareMeter duplicateMessagesMeter, ContextAwareMeter heartbeatMessagesMeter) { // If we've already processed a message with this timestamp and key before then skip duplicate message if (cache.getIfPresent(changeIdentifier) != null) { log.debug("Duplicate change event with identifier {}", changeIdentifier); duplicateMessagesMeter.mark(); return false; } // If event is a heartbeat type then log it and skip processing if (operation.equals("HEARTBEAT")) { log.debug("Received heartbeat message from time {}", timestamp); heartbeatMessagesMeter.mark(); return false; } return true; } }
3,811
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/NoopGaaSObservabilityEventProducer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metrics.GaaSObservabilityEventExperimental; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; /** * The default producer for emitting GaaS Observability Events in the KafkaJobStatusMonitor * This class does no work and will not create or emit any events */ public class NoopGaaSObservabilityEventProducer extends GaaSObservabilityEventProducer { public NoopGaaSObservabilityEventProducer(State state, MultiContextIssueRepository issueRepository, boolean instrumentationEnabled) { super(state, issueRepository, instrumentationEnabled); } public NoopGaaSObservabilityEventProducer() { super(null, null, false); } @Override public void emitObservabilityEvent(State jobState) {} @Override protected void sendUnderlyingEvent(GaaSObservabilityEventExperimental event) {} }
3,812
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GitDiffListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import org.eclipse.jgit.diff.DiffEntry; /** * Listener for {@link GitMonitoringService} to apply changes detected from Git. */ public interface GitDiffListener { void addChange(DiffEntry change); void removeChange(DiffEntry change); }
3,813
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/SpecStoreChangeMonitorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.util.Objects; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Provider; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; import org.apache.gobblin.util.ConfigUtils; /** * A factory implementation that returns a {@link SpecStoreChangeMonitor} instance. */ @Slf4j public class SpecStoreChangeMonitorFactory implements Provider<SpecStoreChangeMonitor> { static final String SPEC_STORE_CHANGE_MONITOR_NUM_THREADS_KEY = "numThreads"; private final Config config; private FlowCatalog flowCatalog; private GobblinServiceJobScheduler scheduler; @Inject public SpecStoreChangeMonitorFactory(Config config, FlowCatalog flowCatalog, GobblinServiceJobScheduler scheduler) { this.config = Objects.requireNonNull(config); this.flowCatalog = flowCatalog; this.scheduler = scheduler; } private SpecStoreChangeMonitor createSpecStoreChangeMonitor() throws ReflectiveOperationException { Config specStoreChangeConfig = this.config.getConfig(SpecStoreChangeMonitor.SPEC_STORE_CHANGE_MONITOR_PREFIX); log.info("SpecStoreChangeMonitor will be initialized with config {}", specStoreChangeConfig); String topic = ""; // Pass empty string because we expect underlying client to dynamically determine the Kafka topic int numThreads = ConfigUtils.getInt(specStoreChangeConfig, SPEC_STORE_CHANGE_MONITOR_NUM_THREADS_KEY, 5); return new SpecStoreChangeMonitor(topic, specStoreChangeConfig, this.flowCatalog, this.scheduler, numThreads); } @Override public SpecStoreChangeMonitor get() { try { return createSpecStoreChangeMonitor(); } catch (ReflectiveOperationException e) { throw new RuntimeException("Failed to initialize SpecStoreChangeMonitor due to ", e); } } }
3,814
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/DagActionStoreChangeMonitorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.util.Objects; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Named; import javax.inject.Provider; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.util.InjectionNames; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.util.ConfigUtils; /** * A factory implementation that returns a {@link DagActionStoreChangeMonitor} instance. */ @Slf4j public class DagActionStoreChangeMonitorFactory implements Provider<DagActionStoreChangeMonitor> { static final String DAG_ACTION_STORE_CHANGE_MONITOR_NUM_THREADS_KEY = "numThreads"; private final Config config; private DagActionStore dagActionStore; private DagManager dagManager; private FlowCatalog flowCatalog; private Orchestrator orchestrator; private boolean isMultiActiveSchedulerEnabled; @Inject public DagActionStoreChangeMonitorFactory(Config config, DagActionStore dagActionStore, DagManager dagManager, FlowCatalog flowCatalog, Orchestrator orchestrator, @Named(InjectionNames.MULTI_ACTIVE_SCHEDULER_ENABLED) boolean isMultiActiveSchedulerEnabled) { this.config = Objects.requireNonNull(config); this.dagActionStore = dagActionStore; this.dagManager = dagManager; this.flowCatalog = flowCatalog; this.orchestrator = orchestrator; this.isMultiActiveSchedulerEnabled = isMultiActiveSchedulerEnabled; } private DagActionStoreChangeMonitor createDagActionStoreMonitor() throws ReflectiveOperationException { Config dagActionStoreChangeConfig = this.config.getConfig(DagActionStoreChangeMonitor.DAG_ACTION_CHANGE_MONITOR_PREFIX); log.info("DagActionStore will be initialized with config {}", dagActionStoreChangeConfig); String topic = ""; // Pass empty string because we expect underlying client to dynamically determine the Kafka topic int numThreads = ConfigUtils.getInt(dagActionStoreChangeConfig, DAG_ACTION_STORE_CHANGE_MONITOR_NUM_THREADS_KEY, 5); return new DagActionStoreChangeMonitor(topic, dagActionStoreChangeConfig, this.dagActionStore, this.dagManager, numThreads, flowCatalog, orchestrator, isMultiActiveSchedulerEnabled); } @Override public DagActionStoreChangeMonitor get() { try { return createDagActionStoreMonitor(); } catch (ReflectiveOperationException e) { throw new RuntimeException("Failed to initialize DagActionStoreMonitor due to ", e); } } }
3,815
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GitMonitoringService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.codec.binary.Base64; import org.eclipse.jgit.api.Git; import org.eclipse.jgit.api.ResetCommand; import org.eclipse.jgit.api.TransportConfigCallback; import org.eclipse.jgit.api.errors.GitAPIException; import org.eclipse.jgit.diff.DiffEntry; import org.eclipse.jgit.errors.RepositoryNotFoundException; import org.eclipse.jgit.lib.ObjectId; import org.eclipse.jgit.lib.ObjectReader; import org.eclipse.jgit.revwalk.RevCommit; import org.eclipse.jgit.transport.CredentialsProvider; import org.eclipse.jgit.transport.JschConfigSessionFactory; import org.eclipse.jgit.transport.OpenSshConfig; import org.eclipse.jgit.transport.SshSessionFactory; import org.eclipse.jgit.transport.SshTransport; import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider; import org.eclipse.jgit.treewalk.CanonicalTreeParser; import org.eclipse.jgit.util.FS; import com.google.common.util.concurrent.AbstractIdleService; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.io.Files; import com.jcraft.jsch.JSch; import com.jcraft.jsch.JSchException; import com.jcraft.jsch.Session; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.password.PasswordManager; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.Either; import org.apache.gobblin.util.ExecutorsUtils; /** * Base monitoring service that polls git and applies changes to each of its listeners * Implementation classes should also define {@link GitDiffListener} to apply changes from git * It is possible to have multiple listeners to the same repository, and each can apply their own changes, but * determining if a change is made and their order is centrally controlled by processGitConfigChanges() */ @Slf4j public abstract class GitMonitoringService extends AbstractIdleService { private static final String REMOTE_NAME = "origin"; private static final int TERMINATION_TIMEOUT = 30; static final String SHOULD_CHECKPOINT_HASHES = "shouldCheckpointHashes"; private final Integer pollingInterval; private final ScheduledExecutorService scheduledExecutor; private String privateKeyPath; private byte[] privateKey; private String passphrase; private boolean isJschLoggerEnabled; private boolean strictHostKeyCheckingEnabled; private String knownHosts; private String knownHostsFile; final GitMonitoringService.GitRepository gitRepo; protected final String repositoryDir; protected final String folderName; protected List<GitDiffListener> listeners = new ArrayList<>(); protected volatile boolean isActive = false; GitMonitoringService(Config config) { Preconditions.checkArgument(config.hasPath(ConfigurationKeys.GIT_MONITOR_REPO_URI), ConfigurationKeys.GIT_MONITOR_REPO_URI + " needs to be specified."); String repositoryUri = config.getString(ConfigurationKeys.GIT_MONITOR_REPO_URI); this.repositoryDir = config.getString(ConfigurationKeys.GIT_MONITOR_REPO_DIR); String branchName = config.getString(ConfigurationKeys.GIT_MONITOR_BRANCH_NAME); this.pollingInterval = config.getInt(ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL); this.folderName = config.getString(ConfigurationKeys.GIT_MONITOR_CONFIG_BASE_DIR); boolean shouldCheckpointHashes = ConfigUtils.getBoolean(config, SHOULD_CHECKPOINT_HASHES, true); PasswordManager passwordManager = PasswordManager.getInstance(ConfigUtils.configToState(config)); Either<CredentialsProvider, SshSessionFactory> providerSessionFactoryEither; boolean isSshWithPublicKeyEnabled = ConfigUtils.getBoolean(config, ConfigurationKeys.GIT_MONITOR_SSH_WITH_PUBLIC_KEY_ENABLED, false); if (isSshWithPublicKeyEnabled) { this.privateKeyPath = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_SSH_PRIVATE_KEY_PATH, null); String privateKeyBase64Encoded = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_SSH_PRIVATE_KEY_BASE64_ENCODED, null); if ((Strings.isNullOrEmpty(this.privateKeyPath)) && ((Strings.isNullOrEmpty(privateKeyBase64Encoded)))) { throw new RuntimeException("Path to private key or private key string must be provided"); } if (!Strings.isNullOrEmpty(privateKeyBase64Encoded)) { this.privateKey = Base64.decodeBase64(privateKeyBase64Encoded); } String passPhraseEnc = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_SSH_PASSPHRASE, null); if (!Strings.isNullOrEmpty(passPhraseEnc)) { this.passphrase = passwordManager.readPassword(passPhraseEnc); } providerSessionFactoryEither = Either.right(getSshSessionFactory()); this.isJschLoggerEnabled = ConfigUtils.getBoolean(config, ConfigurationKeys.GIT_MONITOR_JSCH_LOGGER_ENABLED, false); this.strictHostKeyCheckingEnabled = ConfigUtils.getBoolean(config, ConfigurationKeys.GIT_MONITOR_SSH_STRICT_HOST_KEY_CHECKING_ENABLED, true); this.knownHosts = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_SSH_KNOWN_HOSTS, null); this.knownHostsFile = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_SSH_KNOWN_HOSTS_FILE, null); if (strictHostKeyCheckingEnabled && Strings.isNullOrEmpty(knownHostsFile) && Strings.isNullOrEmpty(knownHosts)) { throw new RuntimeException("Either StrictHostKeyChecking should be disabled or a knownHostFile or knownHosts string must be provided"); } } else { //Use CredentialsProvider String username = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_USERNAME, null); String passwordEnc = ConfigUtils.getString(config, ConfigurationKeys.GIT_MONITOR_PASSWORD, null); String password = null; if (passwordEnc != null) { password = passwordManager.readPassword(passwordEnc); } CredentialsProvider credentialsProvider; //Instantiate CredentialsProvider if username/password is provided. if (!Strings.isNullOrEmpty(username) && !Strings.isNullOrEmpty(password)) { credentialsProvider = new UsernamePasswordCredentialsProvider(username, password); } else { credentialsProvider = CredentialsProvider.getDefault(); } providerSessionFactoryEither = Either.left(credentialsProvider); } try { this.gitRepo = new GitMonitoringService.GitRepository(repositoryUri, repositoryDir, branchName, providerSessionFactoryEither, shouldCheckpointHashes); } catch (GitAPIException | IOException e) { throw new RuntimeException("Could not open git repository", e); } this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor( ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("FetchGitConfExecutor"))); } public synchronized void setActive(boolean isActive) { if (this.isActive == isActive) { // No-op if already in correct state return; } this.isActive = isActive; } /** Start the service. */ @Override protected void startUp() { log.info("Starting the " + getClass().getSimpleName()); log.info("Polling git with interval {} ", this.pollingInterval); // Schedule the job config fetch task this.scheduledExecutor.scheduleAtFixedRate(new Runnable() { @Override public void run() { try { if (shouldPollGit()) { processGitConfigChanges(); } } catch (GitAPIException | IOException e) { log.error("Failed to process git config changes", e); // next run will try again since errors could be intermittent } } }, 0, this.pollingInterval, TimeUnit.SECONDS); } /** * Fetch the list of changes since the last refresh of the repository * @throws GitAPIException * @throws IOException */ @VisibleForTesting void processGitConfigChanges() throws GitAPIException, IOException { List<DiffEntry> changes = this.gitRepo.getChanges(); if (!changes.isEmpty()) { processGitConfigChangesHelper(changes); } } /** * A helper method where actual processing of the list of changes since the last refresh of the repository takes place * and the changes applied. * @throws IOException */ void processGitConfigChangesHelper(List<DiffEntry> changes) throws IOException { for (DiffEntry change : changes) { for (GitDiffListener listener: this.listeners) { switch (change.getChangeType()) { case ADD: case MODIFY: listener.addChange(change); break; case DELETE: listener.removeChange(change); break; case RENAME: listener.removeChange(change); listener.addChange(change); break; default: throw new RuntimeException("Unsupported change type " + change.getChangeType()); } } } // Done processing changes, so checkpoint this.gitRepo.moveCheckpointAndHashesForward(); } /** Stop the service. */ @Override protected void shutDown() throws Exception { this.scheduledExecutor.shutdown(); this.scheduledExecutor.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS); } /** * Class for managing a git repository */ static class GitRepository { private final static String CHECKPOINT_FILE = "checkpoint.txt"; private final static String CHECKPOINT_FILE_TMP = "checkpoint.tmp"; private final String repoUri; private final String repoDir; private final String branchName; private final boolean shouldCheckpointHashes; private final Either<CredentialsProvider, SshSessionFactory> providerSessionFactoryEither; private Git git; private String lastProcessedGitHash; private String latestGitHash; /** * Create an object to manage the git repository stored locally at repoDir with a repository URI of repoDir * @param repoUri URI of repository * @param repoDir Directory to hold the local copy of the repository * @param branchName Branch name * @param providerSessionFactoryEither Either {@link UsernamePasswordCredentialsProvider} or {@link SshSessionFactory} * @param shouldCheckpointHashes a boolean to determine whether to checkpoint commit hashes * @throws GitAPIException * @throws IOException */ GitRepository(String repoUri, String repoDir, String branchName, Either<CredentialsProvider, SshSessionFactory> providerSessionFactoryEither, boolean shouldCheckpointHashes) throws GitAPIException, IOException { this.repoUri = repoUri; this.repoDir = repoDir; this.branchName = branchName; this.providerSessionFactoryEither = providerSessionFactoryEither; this.shouldCheckpointHashes = shouldCheckpointHashes; initRepository(); } /** * Open the repository if it exists locally, otherwise clone it * @throws GitAPIException * @throws IOException */ void initRepository() throws GitAPIException, IOException { File repoDirFile = new File(this.repoDir); try { this.git = Git.open(repoDirFile); String uri = this.git.getRepository().getConfig().getString("remote", REMOTE_NAME, "url"); if (!uri.equals(this.repoUri)) { throw new RuntimeException("Repo at " + this.repoDir + " has uri " + uri + " instead of " + this.repoUri); } } catch (RepositoryNotFoundException e) { // if the repository was not found then clone a new one this.git = Git.cloneRepository() .setDirectory(repoDirFile) .setURI(this.repoUri) .setBranch(this.branchName) .setTransportConfigCallback(buildTransportConfigCallback()) .setCredentialsProvider(getCredentialsProvider()) .call(); } try { this.lastProcessedGitHash = readCheckpoint(); } catch (FileNotFoundException e) { // if no checkpoint is available then start with the first commit Iterable<RevCommit> logs = git.log().call(); RevCommit lastLog = null; for (RevCommit log : logs) { lastLog = log; } if (lastLog != null) { this.lastProcessedGitHash = lastLog.getName(); } } this.latestGitHash = this.lastProcessedGitHash; } /** * Read the last processed commit githash from the checkpoint file * @return * @throws IOException */ private String readCheckpoint() throws IOException { File checkpointFile = new File(this.repoDir, CHECKPOINT_FILE); return Files.toString(checkpointFile, Charsets.UTF_8); } /** * Write the last processed commit githash to the checkpoint file * @param gitHash * @throws IOException */ private void writeCheckpoint(String gitHash) throws IOException { // write to a temporary name then rename to make the operation atomic when the file system allows a file to be // replaced File tmpCheckpointFile = new File(this.repoDir, CHECKPOINT_FILE_TMP); File checkpointFile = new File(this.repoDir, CHECKPOINT_FILE); Files.write(gitHash, tmpCheckpointFile, Charsets.UTF_8); Files.move(tmpCheckpointFile, checkpointFile); } void moveCheckpointAndHashesForward() throws IOException { this.lastProcessedGitHash = this.latestGitHash; if (this.shouldCheckpointHashes) { writeCheckpoint(this.latestGitHash); } } /** * * @throws GitAPIException * @throws IOException */ List<DiffEntry> getChanges() throws GitAPIException, IOException { // get tree for last processed commit ObjectId oldHeadTree = git.getRepository().resolve(this.lastProcessedGitHash + "^{tree}"); // refresh to latest and reset hard to handle forced pushes this.git.fetch() .setRemote(REMOTE_NAME) .setCredentialsProvider(getCredentialsProvider()) .setTransportConfigCallback(buildTransportConfigCallback()) .call(); // reset hard to get a clean working set since pull --rebase may leave files around this.git.reset().setMode(ResetCommand.ResetType.HARD).setRef(REMOTE_NAME + "/" + this.branchName).call(); ObjectId head = this.git.getRepository().resolve("HEAD"); ObjectId headTree = this.git.getRepository().resolve("HEAD^{tree}"); // remember the hash for the current HEAD. This will be checkpointed after the diff is processed. latestGitHash = head.getName(); // diff old and new heads to find changes ObjectReader reader = this.git.getRepository().newObjectReader(); CanonicalTreeParser oldTreeIter = new CanonicalTreeParser(); oldTreeIter.reset(reader, oldHeadTree); CanonicalTreeParser newTreeIter = new CanonicalTreeParser(); newTreeIter.reset(reader, headTree); return this.git.diff() .setNewTree(newTreeIter) .setOldTree(oldTreeIter) .setShowNameAndStatusOnly(true) .call(); } private CredentialsProvider getCredentialsProvider() { return (this.providerSessionFactoryEither instanceof Either.Right)? null : ((Either.Left<CredentialsProvider, SshSessionFactory>) this.providerSessionFactoryEither).getLeft(); } private TransportConfigCallback buildTransportConfigCallback() { if (this.providerSessionFactoryEither instanceof Either.Left) return null; SshSessionFactory sshSessionFactory = ((Either.Right<CredentialsProvider, SshSessionFactory>) this.providerSessionFactoryEither).getRight(); return transport -> { SshTransport sshTransport = (SshTransport) transport; sshTransport.setSshSessionFactory(sshSessionFactory); }; } } private SshSessionFactory getSshSessionFactory() { JschConfigSessionFactory sessionFactory = new JschConfigSessionFactory() { @Override protected void configure(OpenSshConfig.Host hc, Session session) { if (!GitMonitoringService.this.strictHostKeyCheckingEnabled) { session.setConfig("StrictHostKeyChecking", "no"); } } @Override protected JSch createDefaultJSch(FS fs) throws JSchException { if (GitMonitoringService.this.isJschLoggerEnabled) { JSch.setLogger(new JschLogger()); } JSch defaultJSch = super.createDefaultJSch(fs); defaultJSch.getIdentityRepository().removeAll(); if (GitMonitoringService.this.privateKeyPath != null) { defaultJSch.addIdentity(GitMonitoringService.this.privateKeyPath, GitMonitoringService.this.passphrase); } else { defaultJSch.addIdentity("gaas-git", GitMonitoringService.this.privateKey, null, GitMonitoringService.this.passphrase.getBytes(Charset.forName("UTF-8"))); } if (!Strings.isNullOrEmpty(GitMonitoringService.this.knownHosts)) { defaultJSch.setKnownHosts(new ByteArrayInputStream(GitMonitoringService.this.knownHosts.getBytes(Charset.forName("UTF-8")))); } else if (!Strings.isNullOrEmpty(GitMonitoringService.this.knownHostsFile)) { defaultJSch.setKnownHosts(GitMonitoringService.this.knownHostsFile); } return defaultJSch; } }; return sessionFactory; } private static class JschLogger implements com.jcraft.jsch.Logger { static Map<Integer, String> logMap = new HashMap<>(); static { logMap.put(DEBUG, "DEBUG: "); logMap.put(INFO, "INFO: "); logMap.put(WARN, "WARN: "); logMap.put(ERROR, "ERROR: "); logMap.put(FATAL, "FATAL: "); } public boolean isEnabled(int level) { return true; } public void log(int level, String message) { System.err.print(logMap.get(level)); System.err.println(message); } } public abstract boolean shouldPollGit(); }
3,816
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GitConfigMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import javax.inject.Inject; import javax.inject.Singleton; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; /** * Service that monitors for jobs from a git repository. * The git repository must have an initial commit that has no config files since that is used as a base for getting * the change list. * The config needs to be organized with the following structure: * <root_config_dir>/<flowGroup>/<flowName>.(pull|job|json|conf) * The <flowGroup> and <flowName> is used to generate the URI used to store the config in the {@link FlowCatalog} */ @Slf4j @Singleton public class GitConfigMonitor extends GitMonitoringService { public static final String GIT_CONFIG_MONITOR_PREFIX = "gobblin.service.gitConfigMonitor"; private static final String PROPERTIES_EXTENSIONS = "pull,job"; private static final String CONF_EXTENSIONS = "json,conf"; private static final String DEFAULT_GIT_CONFIG_MONITOR_REPO_DIR = "git-flow-config"; private static final String DEFAULT_GIT_CONFIG_MONITOR_CONFIG_DIR = "gobblin-config"; private static final String DEFAULT_GIT_CONFIG_MONITOR_BRANCH_NAME = "master"; private static final int DEFAULT_GIT_CONFIG_MONITOR_POLLING_INTERVAL = 60; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(ConfigurationKeys.GIT_MONITOR_REPO_DIR, DEFAULT_GIT_CONFIG_MONITOR_REPO_DIR) .put(ConfigurationKeys.GIT_MONITOR_CONFIG_BASE_DIR, DEFAULT_GIT_CONFIG_MONITOR_CONFIG_DIR) .put(ConfigurationKeys.GIT_MONITOR_BRANCH_NAME, DEFAULT_GIT_CONFIG_MONITOR_BRANCH_NAME) .put(ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL, DEFAULT_GIT_CONFIG_MONITOR_POLLING_INTERVAL) .put(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS, PROPERTIES_EXTENSIONS) .put(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS, CONF_EXTENSIONS) .build()); private final FlowCatalog flowCatalog; @Inject GitConfigMonitor(Config config, FlowCatalog flowCatalog) { super(config.getConfig(GIT_CONFIG_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK)); this.flowCatalog = flowCatalog; Config configWithFallbacks = config.getConfig(GIT_CONFIG_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK); this.listeners.add(new GitConfigListener(flowCatalog, configWithFallbacks.getString(ConfigurationKeys.GIT_MONITOR_REPO_DIR), configWithFallbacks.getString(ConfigurationKeys.GIT_MONITOR_CONFIG_BASE_DIR), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS))); } @Override public boolean shouldPollGit() { // if not active or if the flow catalog is not up yet then can't process config changes if (!isActive || !this.flowCatalog.isRunning()) { log.warn("GitConfigMonitor: skip poll since the JobCatalog is not yet running. isActive = {}", this.isActive); return false; } return true; } }
3,817
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/KafkaAvroJobStatusMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Properties; import org.apache.avro.io.BinaryDecoder; import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.specific.SpecificDatumReader; import com.codahale.metrics.Meter; import com.google.common.annotations.VisibleForTesting; import com.typesafe.config.Config; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry; import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory; import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.SchemaRegistryVersionWriter; import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter; import org.apache.gobblin.runtime.troubleshooter.JobIssueEventHandler; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.util.ConfigUtils; /** * A job status monitor for Avro messages. Uses {@link GobblinTrackingEvent} schema to parse the messages and calls * {@link #parseJobStatus(GobblinTrackingEvent)} for each received message. */ @Slf4j public class KafkaAvroJobStatusMonitor extends KafkaJobStatusMonitor { private static final String JOB_STATUS_MONITOR_MESSAGE_PARSE_FAILURES = "jobStatusMonitor.messageParseFailures"; private final ThreadLocal<SpecificDatumReader<GobblinTrackingEvent>> reader; private final ThreadLocal<BinaryDecoder> decoder; private final SchemaVersionWriter schemaVersionWriter; @Getter private Meter messageParseFailures; public KafkaAvroJobStatusMonitor(String topic, Config config, int numThreads, JobIssueEventHandler jobIssueEventHandler, GaaSObservabilityEventProducer observabilityEventProducer) throws IOException, ReflectiveOperationException { super(topic, config, numThreads, jobIssueEventHandler, observabilityEventProducer); if (ConfigUtils.getBoolean(config, ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, false)) { KafkaAvroSchemaRegistry schemaRegistry = (KafkaAvroSchemaRegistry) new KafkaAvroSchemaRegistryFactory(). create(ConfigUtils.configToProperties(config)); this.schemaVersionWriter = new SchemaRegistryVersionWriter(schemaRegistry, topic, GobblinTrackingEvent.SCHEMA$); } else { this.schemaVersionWriter = new FixedSchemaVersionWriter(); } this.decoder = ThreadLocal.withInitial(() -> { InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]); return DecoderFactory.get().binaryDecoder(dummyInputStream, null); }); this.reader = ThreadLocal.withInitial(() -> new SpecificDatumReader<>(GobblinTrackingEvent.SCHEMA$)); } @Override protected void createMetrics() { super.createMetrics(); this.messageParseFailures = this.getMetricContext().meter(JOB_STATUS_MONITOR_MESSAGE_PARSE_FAILURES); } @Override @VisibleForTesting public GobblinTrackingEvent deserializeEvent(DecodeableKafkaRecord<byte[],byte[]> message) { try { InputStream is = new ByteArrayInputStream(message.getValue()); schemaVersionWriter.advanceInputStreamToRecord(new DataInputStream(is)); Decoder decoder = DecoderFactory.get().binaryDecoder(is, this.decoder.get()); return this.reader.get().read(null, decoder); } catch (Exception exc) { this.messageParseFailures.mark(); if (this.messageParseFailures.getFiveMinuteRate() < 1) { log.warn("Unable to decode input message at kafka offset" + message.getOffset(), exc); } else { log.warn("Unable to decode input message at kafka offset" + message.getOffset()); } return null; } } /** * Parse the {@link GobblinTrackingEvent}s to determine the {@link ExecutionStatus} of the job. * @param event an instance of {@link GobblinTrackingEvent} * @return job status as an instance of {@link org.apache.gobblin.configuration.State} */ @Override @VisibleForTesting public org.apache.gobblin.configuration.State parseJobStatus(GobblinTrackingEvent event) { if (!acceptEvent(event)) { return null; } Properties properties = new Properties(); properties.putAll(event.getMetadata()); switch (event.getName()) { case TimingEvent.FlowTimings.FLOW_COMPILED: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.COMPILED.name()); break; case TimingEvent.LauncherTimings.WORK_UNITS_CREATION: properties.put(TimingEvent.WORKUNIT_PLAN_START_TIME, properties.getProperty(TimingEvent.METADATA_START_TIME)); properties.put(TimingEvent.WORKUNIT_PLAN_END_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.LauncherTimings.JOB_START: case TimingEvent.FlowTimings.FLOW_RUNNING: case TimingEvent.LauncherTimings.JOB_SUMMARY: case TimingEvent.LauncherTimings.WORK_UNITS_PREPARATION: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.RUNNING.name()); break; case TimingEvent.LauncherTimings.JOB_PENDING: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.PENDING.name()); break; case TimingEvent.FlowTimings.FLOW_PENDING_RESUME: case TimingEvent.LauncherTimings.JOB_PENDING_RESUME: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.PENDING_RESUME.name()); break; case TimingEvent.LauncherTimings.JOB_ORCHESTRATED: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.ORCHESTRATED.name()); properties.put(TimingEvent.JOB_ORCHESTRATED_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.LauncherTimings.JOB_PREPARE: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.RUNNING.name()); properties.put(TimingEvent.JOB_START_TIME, properties.getProperty(TimingEvent.METADATA_START_TIME)); break; case TimingEvent.FlowTimings.FLOW_SUCCEEDED: case TimingEvent.LauncherTimings.JOB_SUCCEEDED: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.COMPLETE.name()); properties.put(TimingEvent.JOB_END_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.FlowTimings.FLOW_FAILED: case TimingEvent.FlowTimings.FLOW_COMPILE_FAILED: case TimingEvent.LauncherTimings.JOB_FAILED: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.FAILED.name()); properties.put(TimingEvent.JOB_END_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.FlowTimings.FLOW_CANCELLED: case TimingEvent.LauncherTimings.JOB_CANCEL: case TimingEvent.JOB_SKIPPED_TIME: properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.CANCELLED.name()); properties.put(TimingEvent.JOB_END_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.FlowTimings.FLOW_RUN_DEADLINE_EXCEEDED: case TimingEvent.FlowTimings.FLOW_START_DEADLINE_EXCEEDED: properties.put(TimingEvent.FlowEventConstants.DOES_CANCELED_FLOW_MERIT_RETRY, true); properties.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.CANCELLED.name()); properties.put(TimingEvent.JOB_END_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; case TimingEvent.JOB_COMPLETION_PERCENTAGE: properties.put(TimingEvent.JOB_LAST_PROGRESS_EVENT_TIME, properties.getProperty(TimingEvent.METADATA_END_TIME)); break; default: return null; } return new org.apache.gobblin.configuration.State(properties); } /** * Filter for {@link GobblinTrackingEvent}. Used to quickly determine whether an event should be used to produce * a {@link JobStatus}. */ private boolean acceptEvent(GobblinTrackingEvent event) { if ((!event.getMetadata().containsKey(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD)) || (!event.getMetadata().containsKey(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD)) || (!event.getMetadata().containsKey(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD))) { return false; } return true; } }
3,818
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/SpecStoreChangeMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.net.URI; import java.net.URISyntaxException; import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.commons.text.StringEscapeUtils; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.kafka.HighLevelConsumer; import org.apache.gobblin.runtime.metrics.RuntimeMetrics; import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; /** * A Flow Spec Store change monitor that uses {@link GenericStoreChangeEvent} schema to process Kafka messages received * from the consumer service. This monitor responds to changes to flow specs (creations, updates, deletes) and acts as * a connector between the API and execution layers of GaaS. */ @Slf4j public class SpecStoreChangeMonitor extends HighLevelConsumer { public static final String SPEC_STORE_CHANGE_MONITOR_PREFIX = "specStoreChangeMonitor"; // Metrics private ContextAwareMeter successfullyAddedSpecs; private ContextAwareMeter messageProcessedMeter; private ContextAwareMeter failedAddedSpecs; private ContextAwareMeter deletedSpecs; private ContextAwareMeter unexpectedErrors; private ContextAwareMeter duplicateMessagesMeter; private ContextAwareMeter heartbeatMessagesMeter; private ContextAwareGauge produceToConsumeDelayMillis; // Reports delay from all partitions in one gauge private volatile Long produceToConsumeDelayValue = -1L; protected CacheLoader<String, String> cacheLoader = new CacheLoader<String, String>() { @Override public String load(String key) throws Exception { return key; } }; protected LoadingCache<String, String> specChangesSeenCache = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build(cacheLoader); protected FlowCatalog flowCatalog; protected GobblinServiceJobScheduler scheduler; // Note that the topic is an empty string (rather than null to avoid NPE) because this monitor relies on the consumer // client itself to determine all Kafka related information dynamically rather than through the config. public SpecStoreChangeMonitor(String topic, Config config, FlowCatalog flowCatalog, GobblinServiceJobScheduler scheduler, int numThreads) { // Differentiate group id for each host super(topic, config.withValue(GROUP_ID_KEY, ConfigValueFactory.fromAnyRef(SPEC_STORE_CHANGE_MONITOR_PREFIX + UUID.randomUUID().toString())), numThreads); this.flowCatalog = flowCatalog; this.scheduler = scheduler; } @Override protected void assignTopicPartitions() { // Expects underlying consumer to handle initializing partitions and offset for the topic - // subscribe to all partitions from latest offset return; } @Override /* Note that although this class is multi-threaded and will call this message for multiple threads (each having a queue associated with it), a given message itself will be partitioned and assigned to only one queue. */ protected void processMessage(DecodeableKafkaRecord message) { // This will also include the heathCheck message so that we can rely on this to monitor the health of this Monitor messageProcessedMeter.mark(); String key = (String) message.getKey(); GenericStoreChangeEvent value = (GenericStoreChangeEvent) message.getValue(); String tid = value.getTxId(); Long produceTimestamp = value.getProduceTimestampMillis(); String operation = value.getOperationType().name(); produceToConsumeDelayValue = calcMillisSince(produceTimestamp); log.debug("Processing message where specUri is {} tid: {} operation: {} delay: {}", key, tid, operation, produceToConsumeDelayValue); String changeIdentifier = tid + key; if (!ChangeMonitorUtils.isValidAndUniqueMessage(changeIdentifier, operation, produceTimestamp.toString(), specChangesSeenCache, duplicateMessagesMeter, heartbeatMessagesMeter)) { return; } Spec spec; URI specAsUri = null; try { specAsUri = new URI(key); } catch (URISyntaxException e) { log.warn("Could not create URI object for specUri {} due to error {}", key, e.getMessage()); this.unexpectedErrors.mark(); return; } spec = (!operation.equals("DELETE")) ? this.flowCatalog.getSpecWrapper(specAsUri) : null; // The monitor should continue to process messages regardless of failures with individual messages, instead we use // metrics to keep track of failure to process certain SpecStoreChange events try { // Call respective action for the type of change received AddSpecResponse response; if (operation.equals("INSERT") || operation.equals("UPDATE")) { response = scheduler.onAddSpec(spec); // Null response means the dag failed to compile if (response != null && FlowCatalog.isCompileSuccessful((String) response.getValue())) { log.info("Successfully added spec {} to scheduler response {}", spec, StringEscapeUtils.escapeJson(response.getValue().toString())); this.successfullyAddedSpecs.mark(); } else { log.warn("Failed to add spec {} to scheduler due to compile error. The flow graph changed recently to " + "invalidate the earlier compilation. Examine changes to locate error. Response is {}", spec, response); this.failedAddedSpecs.mark(); } } else if (operation.equals("DELETE")) { log.info("Deleting spec {} after receiving spec store change event", specAsUri); scheduler.onDeleteSpec(specAsUri, FlowSpec.Builder.DEFAULT_VERSION); this.deletedSpecs.mark(); } else { log.warn("Received unsupported change type of operation {}. Expected values to be in " + "[INSERT, UPDATE, DELETE, HEARTBEAT]. Look for issue with kafka event consumer or emitter", operation); this.unexpectedErrors.mark(); return; } } catch (Exception e) { log.warn("Ran into unexpected error processing SpecStore changes. Reexamine scheduler. Error: {}", e); this.unexpectedErrors.mark(); return; } specChangesSeenCache.put(changeIdentifier, changeIdentifier); } @Override protected void createMetrics() { super.createMetrics(); this.successfullyAddedSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_MONITOR_SUCCESSFULLY_ADDED_SPECS); this.failedAddedSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_MONITOR_FAILED_ADDED_SPECS); this.deletedSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_MONITOR_DELETED_SPECS); this.unexpectedErrors = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_MONITOR_UNEXPECTED_ERRORS); this.messageProcessedMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_MESSAGE_PROCESSED); this.duplicateMessagesMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_DUPLICATE_MESSAGES); this.heartbeatMessagesMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_SPEC_STORE_HEARTBEAT_MESSAGES); this.produceToConsumeDelayMillis = this.getMetricContext().newContextAwareGauge(RuntimeMetrics.GOBBLIN_SPEC_STORE_PRODUCE_TO_CONSUME_DELAY_MILLIS, () -> produceToConsumeDelayValue); this.getMetricContext().register(this.produceToConsumeDelayMillis); } }
3,819
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/FsFlowGraphMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.reflect.ConstructorUtils; import org.apache.hadoop.fs.Path; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.AbstractIdleService; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.flow.MultiHopFlowCompiler; import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraphHelper; import org.apache.gobblin.service.modules.flowgraph.FSPathAlterationFlowGraphListener; import org.apache.gobblin.service.modules.flowgraph.FlowGraphMonitor; import org.apache.gobblin.service.modules.template_catalog.UpdatableFSFlowTemplateCatalog; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.filesystem.PathAlterationObserver; import org.apache.gobblin.util.filesystem.PathAlterationObserverScheduler; @Slf4j public class FsFlowGraphMonitor extends AbstractIdleService implements FlowGraphMonitor { public static final String FS_FLOWGRAPH_MONITOR_PREFIX = "gobblin.service.fsFlowGraphMonitor"; public static final String MONITOR_TEMPLATE_CATALOG_CHANGES = "monitorTemplateChanges"; private static final long DEFAULT_FLOWGRAPH_POLLING_INTERVAL = 60; private static final String DEFAULT_FS_FLOWGRAPH_MONITOR_ABSOLUTE_DIR = "/tmp/fsFlowgraph"; private static final String DEFAULT_FS_FLOWGRAPH_MONITOR_FLOWGRAPH_DIR = "gobblin-flowgraph"; private volatile boolean isActive = false; private final long pollingInterval; private BaseFlowGraphHelper flowGraphHelper; private final PathAlterationObserverScheduler pathAlterationDetector; private final FSPathAlterationFlowGraphListener listener; private final PathAlterationObserver observer; private Path flowGraphPath; private Path observedPath; private final MultiHopFlowCompiler compiler; private final CountDownLatch initComplete; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(ConfigurationKeys.FLOWGRAPH_ABSOLUTE_DIR, DEFAULT_FS_FLOWGRAPH_MONITOR_ABSOLUTE_DIR) .put(ConfigurationKeys.FLOWGRAPH_BASE_DIR, DEFAULT_FS_FLOWGRAPH_MONITOR_FLOWGRAPH_DIR) .put(ConfigurationKeys.FLOWGRAPH_POLLING_INTERVAL, DEFAULT_FLOWGRAPH_POLLING_INTERVAL) .put(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS, ConfigurationKeys.DEFAULT_PROPERTIES_EXTENSIONS) .put(MONITOR_TEMPLATE_CATALOG_CHANGES, false) .put(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS, ConfigurationKeys.DEFAULT_CONF_EXTENSIONS).build()); public FsFlowGraphMonitor(Config config, Optional<UpdatableFSFlowTemplateCatalog> flowTemplateCatalog, MultiHopFlowCompiler compiler, Map<URI, TopologySpec> topologySpecMap, CountDownLatch initComplete, boolean instrumentationEnabled) throws IOException { Config configWithFallbacks = config.getConfig(FS_FLOWGRAPH_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK); this.pollingInterval = TimeUnit.SECONDS.toMillis(configWithFallbacks.getLong(ConfigurationKeys.FLOWGRAPH_POLLING_INTERVAL)); this.flowGraphPath = new Path(configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_ABSOLUTE_DIR)); // If the FSFlowGraphMonitor is also monitoring the templates, assume that they are colocated under the same parent folder boolean shouldMonitorTemplateCatalog = configWithFallbacks.getBoolean(MONITOR_TEMPLATE_CATALOG_CHANGES); this.observedPath = shouldMonitorTemplateCatalog ? this.flowGraphPath.getParent() : this.flowGraphPath; this.observer = new PathAlterationObserver(observedPath); try { String helperClassName = ConfigUtils.getString(config, ServiceConfigKeys.GOBBLIN_SERVICE_FLOWGRAPH_HELPER_KEY, BaseFlowGraphHelper.class.getCanonicalName()); this.flowGraphHelper = (BaseFlowGraphHelper) ConstructorUtils.invokeConstructor(Class.forName(new ClassAliasResolver<>(BaseFlowGraphHelper.class).resolve(helperClassName)), flowTemplateCatalog, topologySpecMap, flowGraphPath.toString(), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_BASE_DIR), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS), instrumentationEnabled, config); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) { throw new RuntimeException(e); } this.listener = new FSPathAlterationFlowGraphListener(flowTemplateCatalog, compiler, flowGraphPath.toString(), this.flowGraphHelper, shouldMonitorTemplateCatalog); this.compiler = compiler; this.initComplete = initComplete; if (pollingInterval == ConfigurationKeys.DISABLED_JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL) { this.pathAlterationDetector = null; } else { this.pathAlterationDetector = new PathAlterationObserverScheduler(pollingInterval); Optional<PathAlterationObserver> observerOptional = Optional.fromNullable(observer); this.pathAlterationDetector.addPathAlterationObserver(this.listener, observerOptional, this.observedPath); } } @Override protected void startUp() throws IOException { } @Override public synchronized void setActive(boolean isActive) { log.info("Setting the flow graph monitor to be " + isActive + " from " + this.isActive); if (this.isActive == isActive) { // No-op if already in correct state return; } else if (isActive) { if (this.pathAlterationDetector != null) { log.info("Starting the " + getClass().getSimpleName()); log.info("Polling folder {} with interval {} ", this.observedPath, this.pollingInterval); try { this.pathAlterationDetector.start(); // Manually instantiate flowgraph when the monitor becomes active this.compiler.setFlowGraph(this.flowGraphHelper.generateFlowGraph()); // Reduce the countdown latch this.initComplete.countDown(); log.info("Finished populating FSFlowgraph"); } catch (IOException e) { log.error("Could not initialize pathAlterationDetector due to error: ", e); } } else { log.warn("No path alteration detector found"); } } this.isActive = isActive; } /** Stop the service. */ @Override protected void shutDown() throws Exception { this.pathAlterationDetector.stop(); } }
3,820
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/KafkaJobStatusMonitorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.util.Objects; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import javax.inject.Inject; import javax.inject.Provider; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys; import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry; import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment; import org.apache.gobblin.runtime.troubleshooter.JobIssueEventHandler; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * A factory implementation that returns a {@link KafkaJobStatusMonitor} instance. */ @Slf4j public class KafkaJobStatusMonitorFactory implements Provider<KafkaJobStatusMonitor> { private static final String KAFKA_SSL_CONFIG_PREFIX_KEY = "jobStatusMonitor.kafka.config"; private static final String DEFAULT_KAFKA_SSL_CONFIG_PREFIX = "metrics.reporting.kafka.config"; private final Config config; private final JobIssueEventHandler jobIssueEventHandler; private final MultiContextIssueRepository issueRepository; private final boolean instrumentationEnabled; @Inject public KafkaJobStatusMonitorFactory(Config config, JobIssueEventHandler jobIssueEventHandler, MultiContextIssueRepository issueRepository, GobblinInstanceEnvironment env) { this(config, jobIssueEventHandler, issueRepository, env.isInstrumentationEnabled()); } public KafkaJobStatusMonitorFactory(Config config, JobIssueEventHandler jobIssueEventHandler, MultiContextIssueRepository issueRepository, boolean instrumentationEnabled) { this.config = Objects.requireNonNull(config); this.jobIssueEventHandler = Objects.requireNonNull(jobIssueEventHandler); this.issueRepository = issueRepository; this.instrumentationEnabled = instrumentationEnabled; } private KafkaJobStatusMonitor createJobStatusMonitor() throws ReflectiveOperationException { Config jobStatusConfig = config.getConfig(KafkaJobStatusMonitor.JOB_STATUS_MONITOR_PREFIX); String topic = jobStatusConfig.getString(KafkaJobStatusMonitor.JOB_STATUS_MONITOR_TOPIC_KEY); int numThreads = ConfigUtils.getInt(jobStatusConfig, KafkaJobStatusMonitor.JOB_STATUS_MONITOR_NUM_THREADS_KEY, 5); Class jobStatusMonitorClass = Class.forName(ConfigUtils.getString(jobStatusConfig, KafkaJobStatusMonitor.JOB_STATUS_MONITOR_CLASS_KEY, KafkaJobStatusMonitor.DEFAULT_JOB_STATUS_MONITOR_CLASS)); Config kafkaSslConfig = ConfigUtils.getConfigOrEmpty(config, KAFKA_SSL_CONFIG_PREFIX_KEY). withFallback(ConfigUtils.getConfigOrEmpty(config, DEFAULT_KAFKA_SSL_CONFIG_PREFIX)); boolean useSchemaRegistry = ConfigUtils.getBoolean(config, ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, false); Config schemaRegistryConfig = ConfigFactory.empty().withValue(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, ConfigValueFactory.fromAnyRef(useSchemaRegistry)); if (useSchemaRegistry) { //Use KafkaAvroSchemaRegistry schemaRegistryConfig = schemaRegistryConfig .withValue(KafkaAvroSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, config.getValue(KafkaAvroSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL)); schemaRegistryConfig = schemaRegistryConfig.withValue(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE, config.getValue(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE)); } jobStatusConfig = jobStatusConfig.withFallback(kafkaSslConfig).withFallback(schemaRegistryConfig); Class observabilityEventProducerClassName = Class.forName(ConfigUtils.getString(config, GaaSObservabilityEventProducer.GAAS_OBSERVABILITY_EVENT_PRODUCER_CLASS_KEY, GaaSObservabilityEventProducer.DEFAULT_GAAS_OBSERVABILITY_EVENT_PRODUCER_CLASS)); GaaSObservabilityEventProducer observabilityEventProducer = (GaaSObservabilityEventProducer) GobblinConstructorUtils.invokeLongestConstructor( observabilityEventProducerClassName, ConfigUtils.configToState(config), this.issueRepository, this.instrumentationEnabled); return (KafkaJobStatusMonitor) GobblinConstructorUtils .invokeLongestConstructor(jobStatusMonitorClass, topic, jobStatusConfig, numThreads, jobIssueEventHandler, observabilityEventProducer); } @Override public KafkaJobStatusMonitor get() { try { return createJobStatusMonitor(); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } }
3,821
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GitFlowGraphMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.net.URI; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import org.eclipse.jgit.api.errors.GitAPIException; import org.eclipse.jgit.diff.DiffEntry; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.modules.flow.MultiHopFlowCompiler; import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraphHelper; import org.apache.gobblin.service.modules.flowgraph.DataNode; import org.apache.gobblin.service.modules.flowgraph.FlowEdge; import org.apache.gobblin.service.modules.flowgraph.FlowGraph; import org.apache.gobblin.service.modules.flowgraph.FlowGraphMonitor; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; /** * Service that monitors for changes to {@link org.apache.gobblin.service.modules.flowgraph.FlowGraph} from a git repository. * The git repository must have an inital commit that has no files since that is used as a base for getting * the change list. * The {@link DataNode}s and {@link FlowEdge}s in FlowGraph need to be organized with the following directory structure on git: * <root_flowGraph_dir>/<nodeName>/<nodeName>.properties * <root_flowGraph_dir>/<nodeName1>/<nodeName2>/<edgeName>.properties */ @Slf4j public class GitFlowGraphMonitor extends GitMonitoringService implements FlowGraphMonitor { public static final String GIT_FLOWGRAPH_MONITOR_PREFIX = "gobblin.service.gitFlowGraphMonitor"; private static final String DEFAULT_GIT_FLOWGRAPH_MONITOR_REPO_DIR = "git-flowgraph"; private static final String DEFAULT_GIT_FLOWGRAPH_MONITOR_FLOWGRAPH_DIR = "gobblin-flowgraph"; private static final String DEFAULT_GIT_FLOWGRAPH_MONITOR_BRANCH_NAME = "master"; static final String SHOULD_CHECKPOINT_HASHES = "shouldCheckpointHashes"; private static final int DEFAULT_GIT_FLOWGRAPH_MONITOR_POLLING_INTERVAL = 60; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(ConfigurationKeys.GIT_MONITOR_REPO_DIR, DEFAULT_GIT_FLOWGRAPH_MONITOR_REPO_DIR) .put(ConfigurationKeys.GIT_MONITOR_CONFIG_BASE_DIR, DEFAULT_GIT_FLOWGRAPH_MONITOR_FLOWGRAPH_DIR) .put(ConfigurationKeys.GIT_MONITOR_BRANCH_NAME, DEFAULT_GIT_FLOWGRAPH_MONITOR_BRANCH_NAME) .put(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS, ConfigurationKeys.DEFAULT_PROPERTIES_EXTENSIONS) .put(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS, ConfigurationKeys.DEFAULT_CONF_EXTENSIONS) .put(ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL, DEFAULT_GIT_FLOWGRAPH_MONITOR_POLLING_INTERVAL) .put(SHOULD_CHECKPOINT_HASHES, false).build()); private final Optional<? extends FSFlowTemplateCatalog> flowTemplateCatalog; private final CountDownLatch initComplete; private final BaseFlowGraphHelper flowGraphHelper; private final MultiHopFlowCompiler multihopFlowCompiler; public GitFlowGraphMonitor(Config config, Optional<? extends FSFlowTemplateCatalog> flowTemplateCatalog, MultiHopFlowCompiler compiler , Map<URI, TopologySpec> topologySpecMap, CountDownLatch initComplete, boolean instrumentationEnabled) { super(config.getConfig(GIT_FLOWGRAPH_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK)); Config configWithFallbacks = config.getConfig(GIT_FLOWGRAPH_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK); this.flowTemplateCatalog = flowTemplateCatalog; this.initComplete = initComplete; this.flowGraphHelper = new BaseFlowGraphHelper(flowTemplateCatalog, topologySpecMap, configWithFallbacks.getString(ConfigurationKeys.GIT_MONITOR_REPO_DIR), configWithFallbacks.getString(ConfigurationKeys.GIT_MONITOR_CONFIG_BASE_DIR), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_JAVA_PROPS_EXTENSIONS), configWithFallbacks.getString(ConfigurationKeys.FLOWGRAPH_HOCON_FILE_EXTENSIONS), instrumentationEnabled, config); this.multihopFlowCompiler = compiler; } /** * Determine if the service should poll Git. Current behavior is both leaders and followers(s) will poll Git for * changes to {@link FlowGraph}. */ @Override public boolean shouldPollGit() { return this.isActive; } /** * Reprocesses the entire flowgraph from the root folder every time a change in git is detected */ @Override void processGitConfigChanges() throws GitAPIException, IOException { // Pulls repository to latest and grabs changes List<DiffEntry> changes = this.gitRepo.getChanges(); if (flowTemplateCatalog.isPresent() && flowTemplateCatalog.get().getAndSetShouldRefreshFlowGraph(false)) { log.info("Change to template catalog detected, refreshing FlowGraph"); this.gitRepo.initRepository(); } else if (changes.isEmpty()) { return; } log.info("Detected changes in flowGraph, refreshing Flowgraph"); FlowGraph newGraph = this.flowGraphHelper.generateFlowGraph(); if (newGraph != null) { this.multihopFlowCompiler.setFlowGraph(newGraph); } // Noop if flowgraph is already initialized this.initComplete.countDown(); this.gitRepo.moveCheckpointAndHashesForward(); } }
3,822
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/LocalFsJobStatusRetriever.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.File; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Singleton; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.runtime.spec_executorInstance.LocalFsSpecProducer; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.ServiceConfigKeys; /** * A job status monitor for jobs completed by a Gobblin Standalone instance running on the same machine. Mainly used for sandboxing/testing * Considers a job done when Gobblin standalone appends ".done" to the job. Otherwise it will assume the job is in progress */ @Slf4j @Singleton public class LocalFsJobStatusRetriever extends JobStatusRetriever { public static final String CONF_PREFIX = "localFsJobStatusRetriever."; private final String specProducerPath; // Do not use a state store for this implementation, just look at the job folder that @LocalFsSpecProducer writes to @Inject public LocalFsJobStatusRetriever(Config config, MultiContextIssueRepository issueRepository) { super(ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED, issueRepository); this.specProducerPath = config.getString(CONF_PREFIX + LocalFsSpecProducer.LOCAL_FS_PRODUCER_PATH_KEY); } private boolean doesJobExist(String flowName, String flowGroup, long flowExecutionId, String suffix) { // Local FS has no monitor to update job state yet, for now check if standalone is completed with job, and mark as done // Otherwise the job is pending try { String fileName = LocalFsSpecProducer.getJobFileName(new URI(File.separatorChar + flowGroup + File.separatorChar + flowName), String.valueOf(flowExecutionId)) + suffix; return new File(this.specProducerPath + File.separatorChar + fileName).exists(); } catch (URISyntaxException e) { log.error("URISyntaxException occurred when retrieving job status for flow: {},{}", flowGroup, flowName, e); } return false; } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId) { Preconditions.checkArgument(flowName != null, "FlowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "FlowGroup cannot be null"); // For the FS use case, JobExecutionID == FlowExecutionID return getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId, flowName, flowGroup); } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId, String jobName, String jobGroup) { Preconditions.checkArgument(flowName != null, "flowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(jobName != null, "jobName cannot be null"); Preconditions.checkArgument(jobGroup != null, "jobGroup cannot be null"); List<JobStatus> jobStatuses = new ArrayList<>(); JobStatus jobStatus; String JOB_DONE_SUFFIX = ".done"; if (this.doesJobExist(flowName, flowGroup, flowExecutionId, JOB_DONE_SUFFIX)) { jobStatus = JobStatus.builder().flowName(flowName).flowGroup(flowGroup).flowExecutionId(flowExecutionId). jobName(jobName).jobGroup(jobGroup).jobExecutionId(flowExecutionId).eventName(ExecutionStatus.COMPLETE.name()).build(); } else if (this.doesJobExist(flowName, flowGroup, flowExecutionId, "")) { jobStatus = JobStatus.builder().flowName(flowName).flowGroup(flowGroup).flowExecutionId(flowExecutionId). jobName(jobName).jobGroup(jobGroup).jobExecutionId(flowExecutionId).eventName(ExecutionStatus.PENDING.name()).build(); } else { return Collections.emptyIterator(); } jobStatuses.add(jobStatus); return jobStatuses.iterator(); } /** * @param flowName * @param flowGroup * @return the last <code>count</code> flow execution ids with the given flowName and flowGroup. -1 will be returned if no such execution found. */ @Override public List<Long> getLatestExecutionIdsForFlow(String flowName, String flowGroup, int count) { Preconditions.checkArgument(flowName != null, "flowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(count > 0, "Number of execution ids must be at least 1."); //TODO: implement this return null; } /** * @param flowGroup * @return the last <code>countJobStatusesPerFlowName</code> flow statuses within the given flowGroup. */ @Override public List<FlowStatus> getFlowStatusesForFlowGroupExecutions(String flowGroup, int countJobStatusesPerFlowName) { Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(countJobStatusesPerFlowName > 0, "Number of job statuses per flow name must be at least 1 (was: %s).", countJobStatusesPerFlowName); throw new UnsupportedOperationException("Not yet implemented"); } public StateStore<State> getStateStore() { // this jobstatus retriever does not have a state store // only used in tests so this is okay return null; } }
3,823
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/DagActionStoreChangeMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.UUID; import java.util.concurrent.TimeUnit; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.runtime.kafka.HighLevelConsumer; import org.apache.gobblin.runtime.metrics.RuntimeMetrics; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.Orchestrator; /** * A DagActionStore change monitor that uses {@link DagActionStoreChangeEvent} schema to process Kafka messages received * from its corresponding consumer client. This monitor responds to requests to resume or delete a flow and acts as a * connector between the API and execution layers of GaaS. */ @Slf4j public class DagActionStoreChangeMonitor extends HighLevelConsumer { public static final String DAG_ACTION_CHANGE_MONITOR_PREFIX = "dagActionChangeStore"; // Metrics private ContextAwareMeter killsInvoked; private ContextAwareMeter resumesInvoked; private ContextAwareMeter flowsLaunched; private ContextAwareMeter failedFlowLaunchSubmissions; private ContextAwareMeter unexpectedErrors; private ContextAwareMeter messageProcessedMeter; private ContextAwareMeter duplicateMessagesMeter; private ContextAwareMeter heartbeatMessagesMeter; private ContextAwareMeter nullDagActionTypeMessagesMeter; private ContextAwareGauge produceToConsumeDelayMillis; // Reports delay from all partitions in one gauge private volatile Long produceToConsumeDelayValue = -1L; protected CacheLoader<String, String> cacheLoader = new CacheLoader<String, String>() { @Override public String load(String key) throws Exception { return key; } }; protected LoadingCache<String, String> dagActionsSeenCache = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build(cacheLoader); protected DagActionStore dagActionStore; @Getter @VisibleForTesting protected DagManager dagManager; protected Orchestrator orchestrator; protected boolean isMultiActiveSchedulerEnabled; @Getter @VisibleForTesting protected FlowCatalog flowCatalog; // Note that the topic is an empty string (rather than null to avoid NPE) because this monitor relies on the consumer // client itself to determine all Kafka related information dynamically rather than through the config. public DagActionStoreChangeMonitor(String topic, Config config, DagActionStore dagActionStore, DagManager dagManager, int numThreads, FlowCatalog flowCatalog, Orchestrator orchestrator, boolean isMultiActiveSchedulerEnabled) { // Differentiate group id for each host super(topic, config.withValue(GROUP_ID_KEY, ConfigValueFactory.fromAnyRef(DAG_ACTION_CHANGE_MONITOR_PREFIX + UUID.randomUUID().toString())), numThreads); this.dagActionStore = dagActionStore; this.dagManager = dagManager; this.flowCatalog = flowCatalog; this.orchestrator = orchestrator; this.isMultiActiveSchedulerEnabled = isMultiActiveSchedulerEnabled; } @Override protected void assignTopicPartitions() { // Expects underlying consumer to handle initializing partitions and offset for the topic - // subscribe to all partitions from latest offset return; } @Override /* This class is multithreaded and this method will be called by multiple threads, however any given message will be partitioned and processed by only one thread (and corresponding queue). */ protected void processMessage(DecodeableKafkaRecord message) { // This will also include the heathCheck message so that we can rely on this to monitor the health of this Monitor messageProcessedMeter.mark(); String key = (String) message.getKey(); DagActionStoreChangeEvent value = (DagActionStoreChangeEvent) message.getValue(); String tid = value.getChangeEventIdentifier().getTxId(); Long produceTimestamp = value.getChangeEventIdentifier().getProduceTimestampMillis(); String operation = value.getChangeEventIdentifier().getOperationType().name(); String flowGroup = value.getFlowGroup(); String flowName = value.getFlowName(); String flowExecutionId = value.getFlowExecutionId(); produceToConsumeDelayValue = calcMillisSince(produceTimestamp); log.debug("Processing Dag Action message for flow group: {} name: {} executionId: {} tid: {} operation: {} lag: {}", flowGroup, flowName, flowExecutionId, tid, operation, produceToConsumeDelayValue); String changeIdentifier = tid + key; if (!ChangeMonitorUtils.isValidAndUniqueMessage(changeIdentifier, operation, produceTimestamp.toString(), dagActionsSeenCache, duplicateMessagesMeter, heartbeatMessagesMeter)) { return; } // check after filtering out heartbeat messages expected to have `dagActionValue == null` if (value.getDagAction() == null) { log.warn("Skipping null dag action type received for identifier {} ", changeIdentifier); nullDagActionTypeMessagesMeter.mark(); return; } DagActionStore.FlowActionType dagActionType = DagActionStore.FlowActionType.valueOf(value.getDagAction().toString()); // Used to easily log information to identify the dag action DagActionStore.DagAction dagAction = new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, dagActionType); // We only expect INSERT and DELETE operations done to this table. INSERTs correspond to any type of // {@link DagActionStore.FlowActionType} flow requests that have to be processed. DELETEs require no action. try { if (operation.equals("INSERT")) { log.info("DagAction change ({}) received for flow: {}", dagActionType, dagAction); if (dagActionType.equals(DagActionStore.FlowActionType.RESUME)) { dagManager.handleResumeFlowRequest(flowGroup, flowName,Long.parseLong(flowExecutionId)); this.resumesInvoked.mark(); } else if (dagActionType.equals(DagActionStore.FlowActionType.KILL)) { dagManager.handleKillFlowRequest(flowGroup, flowName, Long.parseLong(flowExecutionId)); this.killsInvoked.mark(); } else if (dagActionType.equals(DagActionStore.FlowActionType.LAUNCH)) { // If multi-active scheduler is NOT turned on we should not receive these type of events if (!this.isMultiActiveSchedulerEnabled) { this.unexpectedErrors.mark(); throw new RuntimeException(String.format("Received LAUNCH dagAction while not in multi-active scheduler " + "mode for flowAction: %s", dagAction)); } submitFlowToDagManagerHelper(flowGroup, flowName, flowExecutionId); } else { log.warn("Received unsupported dagAction {}. Expected to be a KILL, RESUME, or LAUNCH", dagActionType); this.unexpectedErrors.mark(); return; } } else if (operation.equals("UPDATE")) { log.warn("Received an UPDATE action to the DagActionStore when values in this store are never supposed to be " + "updated. Flow group: {} name {} executionId {} were updated to action {}", flowGroup, flowName, flowExecutionId, dagActionType); this.unexpectedErrors.mark(); } else if (operation.equals("DELETE")) { log.debug("Deleted flow group: {} name: {} executionId {} from DagActionStore", flowGroup, flowName, flowExecutionId); } else { log.warn("Received unsupported change type of operation {}. Expected values to be in [INSERT, UPDATE, DELETE]", operation); this.unexpectedErrors.mark(); return; } } catch (Exception e) { log.warn("Ran into unexpected error processing DagActionStore changes: {}", e); this.unexpectedErrors.mark(); } dagActionsSeenCache.put(changeIdentifier, changeIdentifier); } protected void submitFlowToDagManagerHelper(String flowGroup, String flowName, String flowExecutionId) { // Retrieve job execution plan by recompiling the flow spec to send to the DagManager FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName); FlowSpec spec = null; try { URI flowUri = FlowSpec.Utils.createFlowSpecUri(flowId); spec = (FlowSpec) flowCatalog.getSpecs(flowUri); // Pass flowExecutionId to DagManager to be used for scheduled flows that do not already contain a flowExecutionId this.orchestrator.submitFlowToDagManager(spec, Optional.of(flowExecutionId)); } catch (URISyntaxException e) { log.warn("Could not create URI object for flowId {}. Exception {}", flowId, e.getMessage()); this.failedFlowLaunchSubmissions.mark(); return; } catch (SpecNotFoundException e) { log.warn("Spec not found for flowId {} due to exception {}", flowId, e.getMessage()); this.failedFlowLaunchSubmissions.mark(); return; } catch (IOException e) { log.warn("Failed to add Job Execution Plan for flowId {} due to exception {}", flowId, e.getMessage()); this.failedFlowLaunchSubmissions.mark(); return; } catch (InterruptedException e) { log.warn("SpecCompiler failed to reach healthy state before compilation of flowId {}. Exception: ", flowId, e); this.failedFlowLaunchSubmissions.mark(); return; } // Only mark this if the dag was successfully added this.flowsLaunched.mark(); } @Override protected void createMetrics() { super.createMetrics(); // Dag Action specific metrics this.killsInvoked = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_KILLS_INVOKED); this.resumesInvoked = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_RESUMES_INVOKED); this.flowsLaunched = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_FLOWS_LAUNCHED); this.failedFlowLaunchSubmissions = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_FAILED_FLOW_LAUNCHED_SUBMISSIONS); this.unexpectedErrors = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_UNEXPECTED_ERRORS); this.messageProcessedMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_MESSAGE_PROCESSED); this.duplicateMessagesMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_DUPLICATE_MESSAGES); this.heartbeatMessagesMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_HEARTBEAT_MESSAGES); this.nullDagActionTypeMessagesMeter = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_MONITOR_NULL_DAG_ACTION_TYPE_MESSAGES); this.produceToConsumeDelayMillis = this.getMetricContext().newContextAwareGauge(RuntimeMetrics.GOBBLIN_DAG_ACTION_STORE_PRODUCE_TO_CONSUME_DELAY_MILLIS, () -> produceToConsumeDelayValue); this.getMetricContext().register(this.produceToConsumeDelayMillis); } @Override protected String getMetricsPrefix() { return RuntimeMetrics.DAG_ACTION_STORE_MONITOR_PREFIX + "."; } }
3,824
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/FsJobStatusRetriever.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Singleton; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metastore.FileContextBasedFsStateStore; import org.apache.gobblin.metastore.FileContextBasedFsStateStoreFactory; import org.apache.gobblin.metastore.FsStateStore; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.function.CheckedExceptionFunction; /** * A FileSystem based implementation of {@link JobStatusRetriever}. This implementation stores the job statuses * as {@link org.apache.gobblin.configuration.State} objects in a {@link FsStateStore}. * The store name is set to flowGroup.flowName, while the table name is set to flowExecutionId.jobGroup.jobName. */ @Slf4j @Singleton public class FsJobStatusRetriever extends JobStatusRetriever { public static final String CONF_PREFIX = "fsJobStatusRetriever"; @Getter private final FileContextBasedFsStateStore<State> stateStore; @Inject public FsJobStatusRetriever(Config config, MultiContextIssueRepository issueRepository) { super(ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY, ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED), issueRepository); this.stateStore = (FileContextBasedFsStateStore<State>) new FileContextBasedFsStateStoreFactory(). createStateStore(config.getConfig(CONF_PREFIX), State.class); } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId) { Preconditions.checkArgument(flowName != null, "FlowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "FlowGroup cannot be null"); Predicate<String> flowExecutionIdPredicate = input -> input.startsWith(flowExecutionId + "."); String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); try { List<String> tableNames = this.stateStore.getTableNames(storeName, flowExecutionIdPredicate); List<JobStatus> jobStatuses = new ArrayList<>(tableNames.size()); for (String tableName: tableNames) { List<State> jobStates = this.stateStore.getAll(storeName, tableName); if (jobStates.isEmpty()) { return Collections.emptyIterator(); } jobStatuses.add(getJobStatus(jobStates.get(0))); } return jobStatuses.iterator(); } catch (IOException e) { log.error(String.format("IOException encountered when retrieving job statuses for flow: %s,%s,%s", flowGroup, flowName, flowExecutionId), e); return Collections.emptyIterator(); } } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId, String jobName, String jobGroup) { Preconditions.checkArgument(flowName != null, "flowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(jobName != null, "jobName cannot be null"); Preconditions.checkArgument(jobGroup != null, "jobGroup cannot be null"); try { String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); String tableName = KafkaJobStatusMonitor.jobStatusTableName(flowExecutionId, jobGroup, jobName); List<State> jobStates = this.stateStore.getAll(storeName, tableName); if (jobStates.isEmpty()) { return Collections.emptyIterator(); } else { return Iterators.singletonIterator(getJobStatus(jobStates.get(0))); } } catch (IOException e) { log.error(String.format("Exception encountered when listing files for flow: %s,%s,%s;%s,%s", flowGroup, flowName, flowExecutionId, jobGroup, jobName), e); return Collections.emptyIterator(); } } @Override public List<FlowStatus> getFlowStatusesForFlowGroupExecutions(String flowGroup, int countJobStatusesPerFlowName) { Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(countJobStatusesPerFlowName > 0, "Number of job statuses per flow name must be at least 1 (was: %s).", countJobStatusesPerFlowName); try { String storeNamePrefix = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, ""); List<String> storeNamesForFlowGroup = stateStore.getStoreNames(storeName -> storeName.startsWith(storeNamePrefix)); List<State> flowGroupExecutionsStates = storeNamesForFlowGroup.stream().flatMap(CheckedExceptionFunction.wrapToUnchecked(storeName -> stateStore.getAll(storeName).stream() )).collect(Collectors.toList()); return asFlowStatuses(groupByFlowExecutionAndRetainLatest(flowGroup, flowGroupExecutionsStates, countJobStatusesPerFlowName)); } catch (IOException | RuntimeException e) { // (latter likely wrapping `IOException` originating within `wrapUnchecked`) log.error(String.format("Exception encountered when listing files for flow group: %s", flowGroup), e); return ImmutableList.of(); } } /** * @param flowName * @param flowGroup * @return the last <code>count</code> flow execution ids with the given flowName and flowGroup. -1 will be returned if no such execution found. */ @Override public List<Long> getLatestExecutionIdsForFlow(String flowName, String flowGroup, int count) { Preconditions.checkArgument(flowName != null, "flowName cannot be null"); Preconditions.checkArgument(flowGroup != null, "flowGroup cannot be null"); Preconditions.checkArgument(count > 0, "Number of execution ids must be at least 1."); try { String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); List<String> tableNames = this.stateStore.getTableNames(storeName, input -> true); Set<Long> flowExecutionIds = new TreeSet<>(tableNames.stream() .map(KafkaJobStatusMonitor::getExecutionIdFromTableName) .collect(Collectors.toList())).descendingSet(); return ImmutableList.copyOf(Iterables.limit(flowExecutionIds, count)); } catch (Exception e) { return null; } } }
3,825
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/GaaSObservabilityEventProducer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; import com.codahale.metrics.MetricRegistry; import com.google.gson.reflect.TypeToken; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.State; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.DatasetMetric; import org.apache.gobblin.metrics.GaaSObservabilityEventExperimental; import org.apache.gobblin.metrics.Issue; import org.apache.gobblin.metrics.IssueSeverity; import org.apache.gobblin.metrics.JobStatus; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.DatasetTaskSummary; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException; import org.apache.gobblin.runtime.troubleshooter.TroubleshooterUtils; import org.apache.gobblin.runtime.util.GsonUtils; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.orchestration.AzkabanProjectConfig; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; /** * A class embedded within GaaS running in the JobStatusMonitor which emits GaaSObservabilityEvents after each job in a flow * This is an abstract class, we need a sub system like Kakfa, which support at least once delivery, to emit the event */ @Slf4j public abstract class GaaSObservabilityEventProducer implements Closeable { public static final String GAAS_OBSERVABILITY_EVENT_PRODUCER_PREFIX = "GaaSObservabilityEventProducer."; public static final String GAAS_OBSERVABILITY_EVENT_PRODUCER_CLASS_KEY = GAAS_OBSERVABILITY_EVENT_PRODUCER_PREFIX + "class.name"; public static final String DEFAULT_GAAS_OBSERVABILITY_EVENT_PRODUCER_CLASS = NoopGaaSObservabilityEventProducer.class.getName(); public static final String ISSUES_READ_FAILED_METRIC_NAME = GAAS_OBSERVABILITY_EVENT_PRODUCER_PREFIX + "getIssuesFailedCount"; protected MetricContext metricContext; protected State state; protected MultiContextIssueRepository issueRepository; protected boolean instrumentationEnabled; ContextAwareMeter getIssuesFailedMeter; public GaaSObservabilityEventProducer(State state, MultiContextIssueRepository issueRepository, boolean instrumentationEnabled) { this.state = state; this.issueRepository = issueRepository; this.instrumentationEnabled = instrumentationEnabled; if (this.instrumentationEnabled) { this.metricContext = Instrumented.getMetricContext(state, getClass()); this.getIssuesFailedMeter = this.metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ISSUES_READ_FAILED_METRIC_NAME)); } } public void emitObservabilityEvent(final State jobState) { GaaSObservabilityEventExperimental event = createGaaSObservabilityEvent(jobState); sendUnderlyingEvent(event); } /** * Emits the GaaSObservabilityEvent with the mechanism that the child class is built upon e.g. Kafka * @param event */ abstract protected void sendUnderlyingEvent(GaaSObservabilityEventExperimental event); /** * Creates a GaaSObservabilityEvent which is derived from a final GaaS job pipeline state, which is combination of GTE job states in an ordered fashion * @param jobState * @return GaaSObservabilityEvent */ private GaaSObservabilityEventExperimental createGaaSObservabilityEvent(final State jobState) { Long jobStartTime = jobState.contains(TimingEvent.JOB_START_TIME) ? jobState.getPropAsLong(TimingEvent.JOB_START_TIME) : null; Long jobEndTime = jobState.contains(TimingEvent.JOB_END_TIME) ? jobState.getPropAsLong(TimingEvent.JOB_END_TIME) : null; Long jobOrchestratedTime = jobState.contains(TimingEvent.JOB_ORCHESTRATED_TIME) ? jobState.getPropAsLong(TimingEvent.JOB_ORCHESTRATED_TIME) : null; Long jobPlanningPhaseStartTime = jobState.contains(TimingEvent.WORKUNIT_PLAN_START_TIME) ? jobState.getPropAsLong(TimingEvent.WORKUNIT_PLAN_START_TIME) : null; Long jobPlanningPhaseEndTime = jobState.contains(TimingEvent.WORKUNIT_PLAN_END_TIME) ? jobState.getPropAsLong(TimingEvent.WORKUNIT_PLAN_END_TIME) : null; Type datasetTaskSummaryType = new TypeToken<ArrayList<DatasetTaskSummary>>(){}.getType(); List<DatasetTaskSummary> datasetTaskSummaries = jobState.contains(TimingEvent.DATASET_TASK_SUMMARIES) ? GsonUtils.GSON_WITH_DATE_HANDLING.fromJson(jobState.getProp(TimingEvent.DATASET_TASK_SUMMARIES), datasetTaskSummaryType) : null; List<DatasetMetric> datasetMetrics = datasetTaskSummaries != null ? datasetTaskSummaries.stream().map( DatasetTaskSummary::toDatasetMetric).collect(Collectors.toList()) : null; GaaSObservabilityEventExperimental.Builder builder = GaaSObservabilityEventExperimental.newBuilder(); List<Issue> issueList = null; try { issueList = getIssuesForJob(issueRepository, jobState); } catch (Exception e) { // If issues cannot be fetched, increment metric but continue to try to emit the event log.error("Could not fetch issues while creating GaaSObservabilityEvent due to ", e); if (this.instrumentationEnabled) { this.getIssuesFailedMeter.mark(); } } JobStatus status = convertExecutionStatusTojobState(jobState, ExecutionStatus.valueOf(jobState.getProp(JobStatusRetriever.EVENT_NAME_FIELD))); builder.setTimestamp(System.currentTimeMillis()) .setFlowName(jobState.getProp(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD)) .setFlowGroup(jobState.getProp(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD)) .setFlowGraphEdgeId(jobState.getProp(TimingEvent.FlowEventConstants.FLOW_EDGE_FIELD, "")) .setFlowExecutionId(jobState.getPropAsLong(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD)) .setLastFlowModificationTime(jobState.getPropAsLong(TimingEvent.FlowEventConstants.FLOW_MODIFICATION_TIME_FIELD, 0)) .setJobName(jobState.getProp(TimingEvent.FlowEventConstants.JOB_NAME_FIELD)) .setExecutorUrl(jobState.getProp(TimingEvent.METADATA_MESSAGE)) .setExecutorId(jobState.getProp(TimingEvent.FlowEventConstants.SPEC_EXECUTOR_FIELD, "")) .setJobStartTime(jobStartTime) .setJobEndTime(jobEndTime) .setJobOrchestratedTime(jobOrchestratedTime) .setJobPlanningPhaseStartTime(jobPlanningPhaseStartTime) .setJobPlanningPhaseEndTime(jobPlanningPhaseEndTime) .setIssues(issueList) .setJobStatus(status) .setExecutionUserUrn(jobState.getProp(AzkabanProjectConfig.USER_TO_PROXY, null)) .setDatasetsWritten(datasetMetrics) .setGaasId(this.state.getProp(ServiceConfigKeys.GOBBLIN_SERVICE_INSTANCE_NAME, null)) .setJobProperties(jobState.getProp(JobExecutionPlan.JOB_PROPS_KEY, null)); return builder.build(); } private static JobStatus convertExecutionStatusTojobState(State state, ExecutionStatus executionStatus) { switch (executionStatus) { case FAILED: // TODO: Separate failure cases to SUBMISSION FAILURE and COMPILATION FAILURE, investigate events to populate these fields if (state.contains(TimingEvent.JOB_END_TIME)) { return JobStatus.EXECUTION_FAILURE; } return JobStatus.SUBMISSION_FAILURE; case COMPLETE: return JobStatus.SUCCEEDED; case CANCELLED: // TODO: If cancelled due to start SLA exceeded, consider grouping this as a submission failure? return JobStatus.CANCELLED; default: return null; } } private static List<Issue> getIssuesForJob(MultiContextIssueRepository issueRepository, State jobState) throws TroubleshooterException { return issueRepository.getAll(TroubleshooterUtils.getContextIdForJob(jobState.getProperties())).stream().map( issue -> new Issue( issue.getTime().toEpochSecond(), IssueSeverity.valueOf(issue.getSeverity().toString()), issue.getCode(), issue.getSummary(), issue.getDetails(), issue.getProperties() )).collect(Collectors.toList()); } @Override public void close() throws IOException { // producer close will handle by the cache if (this.instrumentationEnabled) { this.metricContext.close(); } } }
3,826
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/KafkaJobStatusMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.time.Duration; import java.util.List; import java.util.Optional; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import com.github.rholder.retry.Attempt; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.RetryListener; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.google.common.annotations.VisibleForTesting; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.metastore.FileContextBasedFsStateStore; import org.apache.gobblin.metastore.FileContextBasedFsStateStoreFactory; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.TaskContext; import org.apache.gobblin.runtime.kafka.HighLevelConsumer; import org.apache.gobblin.runtime.retention.DatasetCleanerTask; import org.apache.gobblin.runtime.troubleshooter.IssueEventBuilder; import org.apache.gobblin.runtime.troubleshooter.JobIssueEventHandler; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.retry.RetryerFactory; import static org.apache.gobblin.util.retry.RetryerFactory.*; /** * A Kafka monitor that tracks {@link org.apache.gobblin.metrics.GobblinTrackingEvent}s reporting statuses of * running jobs. The job statuses are stored as {@link org.apache.gobblin.configuration.State} objects in * a {@link FileContextBasedFsStateStore}. */ @Slf4j public abstract class KafkaJobStatusMonitor extends HighLevelConsumer<byte[], byte[]> { public static final String JOB_STATUS_MONITOR_PREFIX = "jobStatusMonitor"; //We use table suffix that is different from the Gobblin job state store suffix of jst to avoid confusion. //gst refers to the state store suffix for GaaS-orchestrated Gobblin jobs. public static final String GET_AND_SET_JOB_STATUS = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, JOB_STATUS_MONITOR_PREFIX, "getAndSetJobStatus"); private static final String PROCESS_JOB_ISSUE = MetricRegistry .name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, JOB_STATUS_MONITOR_PREFIX, "jobIssueProcessingTime"); static final String JOB_STATUS_MONITOR_TOPIC_KEY = "topic"; static final String JOB_STATUS_MONITOR_NUM_THREADS_KEY = "numThreads"; static final String JOB_STATUS_MONITOR_CLASS_KEY = "class"; static final String DEFAULT_JOB_STATUS_MONITOR_CLASS = KafkaAvroJobStatusMonitor.class.getName(); private static final String KAFKA_AUTO_OFFSET_RESET_KEY = "auto.offset.reset"; private static final String KAFKA_AUTO_OFFSET_RESET_SMALLEST = "smallest"; @Getter private final StateStore<org.apache.gobblin.configuration.State> stateStore; private final ScheduledExecutorService scheduledExecutorService; private static final Config RETRYER_FALLBACK_CONFIG = ConfigFactory.parseMap(ImmutableMap.of( RETRY_TIME_OUT_MS, TimeUnit.HOURS.toMillis(24L), // after a day, presume non-transient and give up RETRY_INTERVAL_MS, TimeUnit.MINUTES.toMillis(1L), // back-off to once/minute RETRY_TYPE, RetryType.EXPONENTIAL.name())); private static final Config DEFAULTS = ConfigFactory.parseMap(ImmutableMap.of( KAFKA_AUTO_OFFSET_RESET_KEY, KAFKA_AUTO_OFFSET_RESET_SMALLEST)); private static final List<ExecutionStatus> ORDERED_EXECUTION_STATUSES = ImmutableList .of(ExecutionStatus.COMPILED, ExecutionStatus.PENDING, ExecutionStatus.PENDING_RESUME, ExecutionStatus.PENDING_RETRY, ExecutionStatus.ORCHESTRATED, ExecutionStatus.RUNNING, ExecutionStatus.COMPLETE, ExecutionStatus.FAILED, ExecutionStatus.CANCELLED); private final JobIssueEventHandler jobIssueEventHandler; private final Retryer<Void> persistJobStatusRetryer; private final GaaSObservabilityEventProducer eventProducer; public KafkaJobStatusMonitor(String topic, Config config, int numThreads, JobIssueEventHandler jobIssueEventHandler, GaaSObservabilityEventProducer observabilityEventProducer) throws ReflectiveOperationException { super(topic, config.withFallback(DEFAULTS), numThreads); String stateStoreFactoryClass = ConfigUtils.getString(config, ConfigurationKeys.STATE_STORE_FACTORY_CLASS_KEY, FileContextBasedFsStateStoreFactory.class.getName()); this.stateStore = ((StateStore.Factory) Class.forName(stateStoreFactoryClass).newInstance()).createStateStore(config, org.apache.gobblin.configuration.State.class); this.scheduledExecutorService = Executors.newScheduledThreadPool(1); this.jobIssueEventHandler = jobIssueEventHandler; Config retryerOverridesConfig = config.hasPath(KafkaJobStatusMonitor.JOB_STATUS_MONITOR_PREFIX) ? config.getConfig(KafkaJobStatusMonitor.JOB_STATUS_MONITOR_PREFIX) : ConfigFactory.empty(); // log exceptions to expose errors we suffer under and/or guide intervention when resolution not readily forthcoming this.persistJobStatusRetryer = RetryerFactory.newInstance(retryerOverridesConfig.withFallback(RETRYER_FALLBACK_CONFIG), Optional.of(new RetryListener() { @Override public <V> void onRetry(Attempt<V> attempt) { if (attempt.hasException()) { String msg = String.format("(Likely retryable) failure adding job status to state store [attempt: %d; %s after start]", attempt.getAttemptNumber(), Duration.ofMillis(attempt.getDelaySinceFirstAttempt()).toString()); log.warn(msg, attempt.getExceptionCause()); } } })); this.eventProducer = observabilityEventProducer; } @Override protected void startUp() { super.startUp(); log.info("Scheduling state store cleaner.."); org.apache.gobblin.configuration.State state = new org.apache.gobblin.configuration.State(ConfigUtils.configToProperties(this.config)); state.setProp(ConfigurationKeys.JOB_ID_KEY, "GobblinServiceJobStatusCleanerJob"); state.setProp(ConfigurationKeys.TASK_ID_KEY, "GobblinServiceJobStatusCleanerTask"); TaskContext taskContext = new TaskContext(new WorkUnitState(WorkUnit.createEmpty(), state)); DatasetCleanerTask cleanerTask = new DatasetCleanerTask(taskContext); scheduledExecutorService.scheduleAtFixedRate(cleanerTask, 300L, 86400L, TimeUnit.SECONDS); } @Override public void shutDown() { super.shutDown(); this.scheduledExecutorService.shutdown(); try { this.scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS); } catch (InterruptedException e) { log.error("Exception encountered when shutting down state store cleaner", e); } } @Override protected void createMetrics() { super.createMetrics(); } @Override protected void processMessage(DecodeableKafkaRecord<byte[],byte[]> message) { GobblinTrackingEvent gobblinTrackingEvent = deserializeEvent(message); if (gobblinTrackingEvent == null) { return; } if (IssueEventBuilder.isIssueEvent(gobblinTrackingEvent)) { try (Timer.Context context = getMetricContext().timer(PROCESS_JOB_ISSUE).time()) { jobIssueEventHandler.processEvent(gobblinTrackingEvent); } } try { persistJobStatusRetryer.call(() -> { // re-create `jobStatus` on each attempt, since mutated within `addJobStatusToStateStore` org.apache.gobblin.configuration.State jobStatus = parseJobStatus(gobblinTrackingEvent); if (jobStatus != null) { try (Timer.Context context = getMetricContext().timer(GET_AND_SET_JOB_STATUS).time()) { addJobStatusToStateStore(jobStatus, this.stateStore, this.eventProducer); } } return null; }); } catch (ExecutionException ee) { String msg = String.format("Failed to add job status to state store for kafka offset %d", message.getOffset()); log.warn(msg, ee); // Throw RuntimeException to avoid advancing kafka offsets without updating state store throw new RuntimeException(msg, ee.getCause()); } catch (RetryException re) { String interruptedNote = Thread.currentThread().isInterrupted() ? "... then interrupted" : ""; String msg = String.format("Failed to add job status to state store for kafka offset %d (retried %d times%s)", message.getOffset(), re.getNumberOfFailedAttempts(), interruptedNote); Throwable informativeException = re.getLastFailedAttempt().hasException() ? re.getLastFailedAttempt().getExceptionCause() : re; log.warn(msg, informativeException); // Throw RuntimeException to avoid advancing kafka offsets without updating state store throw new RuntimeException(msg, informativeException); } } /** * Persist job status to the underlying {@link StateStore}. * It fills missing fields in job status and also merge the fields with the * existing job status in the state store. Merging is required because we * do not want to lose the information sent by other GobblinTrackingEvents. * @param jobStatus * @throws IOException */ @VisibleForTesting static void addJobStatusToStateStore(org.apache.gobblin.configuration.State jobStatus, StateStore stateStore, GaaSObservabilityEventProducer eventProducer) throws IOException { try { if (!jobStatus.contains(TimingEvent.FlowEventConstants.JOB_NAME_FIELD)) { jobStatus.setProp(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, JobStatusRetriever.NA_KEY); } if (!jobStatus.contains(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD)) { jobStatus.setProp(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, JobStatusRetriever.NA_KEY); } String flowName = jobStatus.getProp(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD); String flowGroup = jobStatus.getProp(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD); String flowExecutionId = jobStatus.getProp(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD); String jobName = jobStatus.getProp(TimingEvent.FlowEventConstants.JOB_NAME_FIELD); String jobGroup = jobStatus.getProp(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD); String storeName = jobStatusStoreName(flowGroup, flowName); String tableName = jobStatusTableName(flowExecutionId, jobGroup, jobName); List<org.apache.gobblin.configuration.State> states = stateStore.getAll(storeName, tableName); if (states.size() > 0) { org.apache.gobblin.configuration.State previousJobStatus = states.get(states.size() - 1); String previousStatus = previousJobStatus.getProp(JobStatusRetriever.EVENT_NAME_FIELD); String currentStatus = jobStatus.getProp(JobStatusRetriever.EVENT_NAME_FIELD); int previousGeneration = previousJobStatus.getPropAsInt(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, 1); // This is to make the change backward compatible as we may not have this info in cluster events // If we does not have those info, we treat the event as coming from the same attempts as previous one int currentGeneration = jobStatus.getPropAsInt(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, previousGeneration); int previousAttempts = previousJobStatus.getPropAsInt(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, 1); int currentAttempts = jobStatus.getPropAsInt(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, previousAttempts); // Verify if the current job status is flow status. If yes, we check for its current execution status to be PENDING_RESUME (limiting to just resume flow statuses) // When the above two conditions satisfy, we NEED NOT check for the out-of-order events since GaaS would manage the lifecycle of these events // Hence, we update the merge state accordingly so that the flow can proceed with its execution to the next state in the DAG boolean isFlowStatusAndPendingResume = isFlowStatusAndPendingResume(jobName, jobGroup, currentStatus); // We use three things to accurately count and thereby bound retries, even amidst out-of-order events (by skipping late arrivals). // The generation is monotonically increasing, while the attempts may re-initialize back to 0. this two-part form prevents the composite value from ever repeating. // And job status reflect the execution status in one attempt if (!isFlowStatusAndPendingResume && (previousStatus != null && currentStatus != null && (previousGeneration > currentGeneration || ( previousGeneration == currentGeneration && previousAttempts > currentAttempts) || (previousGeneration == currentGeneration && previousAttempts == currentAttempts && ORDERED_EXECUTION_STATUSES.indexOf(ExecutionStatus.valueOf(currentStatus)) < ORDERED_EXECUTION_STATUSES.indexOf(ExecutionStatus.valueOf(previousStatus)))))) { log.warn(String.format( "Received status [generation.attempts] = %s [%s.%s] when already %s [%s.%s] for flow (%s, %s, %s), job (%s, %s)", currentStatus, currentGeneration, currentAttempts, previousStatus, previousGeneration, previousAttempts, flowGroup, flowName, flowExecutionId, jobGroup, jobName)); jobStatus = mergeState(states.get(states.size() - 1), jobStatus); } else { jobStatus = mergeState(jobStatus, states.get(states.size() - 1)); } } modifyStateIfRetryRequired(jobStatus); stateStore.put(storeName, tableName, jobStatus); if (isNewStateTransitionToFinal(jobStatus, states)) { eventProducer.emitObservabilityEvent(jobStatus); } } catch (Exception e) { log.warn("Meet exception when adding jobStatus to state store at " + e.getStackTrace()[0].getClassName() + "line number: " + e.getStackTrace()[0].getLineNumber(), e); throw new IOException(e); } } private static boolean isFlowStatusAndPendingResume(String jobName, String jobGroup, String currentStatus) { return jobName != null && jobGroup != null && jobName.equals(JobStatusRetriever.NA_KEY) && jobGroup.equals(JobStatusRetriever.NA_KEY) && currentStatus.equals(ExecutionStatus.PENDING_RESUME.name()); } private static void modifyStateIfRetryRequired(org.apache.gobblin.configuration.State state) { int maxAttempts = state.getPropAsInt(TimingEvent.FlowEventConstants.MAX_ATTEMPTS_FIELD, 1); int currentAttempts = state.getPropAsInt(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, 1); // SHOULD_RETRY_FIELD maybe reset by JOB_COMPLETION_PERCENTAGE event if (state.contains(JobStatusRetriever.EVENT_NAME_FIELD) &&(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD).equals(ExecutionStatus.FAILED.name()) || state.getProp(JobStatusRetriever.EVENT_NAME_FIELD).equals(ExecutionStatus.PENDING_RETRY.name()) || (state.getProp(JobStatusRetriever.EVENT_NAME_FIELD).equals(ExecutionStatus.CANCELLED.name()) && state.contains(TimingEvent.FlowEventConstants.DOES_CANCELED_FLOW_MERIT_RETRY)) ) && currentAttempts < maxAttempts) { state.setProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, true); state.setProp(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.PENDING_RETRY.name()); state.removeProp(TimingEvent.JOB_END_TIME); } state.removeProp(TimingEvent.FlowEventConstants.DOES_CANCELED_FLOW_MERIT_RETRY); } static boolean isNewStateTransitionToFinal(org.apache.gobblin.configuration.State currentState, List<org.apache.gobblin.configuration.State> prevStates) { if (prevStates.size() == 0) { return FlowStatusGenerator.FINISHED_STATUSES.contains(currentState.getProp(JobStatusRetriever.EVENT_NAME_FIELD)); } return currentState.contains(JobStatusRetriever.EVENT_NAME_FIELD) && FlowStatusGenerator.FINISHED_STATUSES.contains(currentState.getProp(JobStatusRetriever.EVENT_NAME_FIELD)) && !FlowStatusGenerator.FINISHED_STATUSES.contains(prevStates.get(prevStates.size()-1).getProp(JobStatusRetriever.EVENT_NAME_FIELD)); } /** * Merge states based on precedence defined by {@link #ORDERED_EXECUTION_STATUSES}. * The state instance in the 1st argument reflects the more recent state of a job * (and is thus, given higher priority) compared to the 2nd argument. * @param state higher priority state * @param fallbackState lower priority state * @return merged state */ private static org.apache.gobblin.configuration.State mergeState(org.apache.gobblin.configuration.State state, org.apache.gobblin.configuration.State fallbackState) { Properties mergedState = new Properties(); mergedState.putAll(fallbackState.getProperties()); mergedState.putAll(state.getProperties()); return new org.apache.gobblin.configuration.State(mergedState); } public static String jobStatusTableName(String flowExecutionId, String jobGroup, String jobName) { return Joiner.on(ServiceConfigKeys.STATE_STORE_KEY_SEPARATION_CHARACTER).join(flowExecutionId, jobGroup, jobName, ServiceConfigKeys.STATE_STORE_TABLE_SUFFIX); } public static String jobStatusTableName(long flowExecutionId, String jobGroup, String jobName) { return jobStatusTableName(String.valueOf(flowExecutionId), jobGroup, jobName); } public static String jobStatusStoreName(String flowGroup, String flowName) { return Joiner.on(ServiceConfigKeys.STATE_STORE_KEY_SEPARATION_CHARACTER).join(flowGroup, flowName); } public static long getExecutionIdFromTableName(String tableName) { return Long.parseLong(Splitter.on(ServiceConfigKeys.STATE_STORE_KEY_SEPARATION_CHARACTER).splitToList(tableName).get(0)); } protected abstract GobblinTrackingEvent deserializeEvent(DecodeableKafkaRecord<byte[],byte[]> message); protected abstract org.apache.gobblin.configuration.State parseJobStatus(GobblinTrackingEvent event); }
3,827
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/monitoring/MysqlJobStatusRetriever.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.monitoring; import java.io.IOException; import java.util.Iterator; import java.util.List; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import com.google.common.collect.Ordering; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Singleton; import lombok.Getter; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metastore.MysqlJobStatusStateStore; import org.apache.gobblin.metastore.MysqlJobStatusStateStoreFactory; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.util.ConfigUtils; /** * Mysql based Retriever for {@link JobStatus}. */ @Singleton public class MysqlJobStatusRetriever extends JobStatusRetriever { @FunctionalInterface private interface SupplierThrowingIO<T> { T get() throws IOException; } public static final String MYSQL_JOB_STATUS_RETRIEVER_PREFIX = "mysqlJobStatusRetriever"; public static final String GET_LATEST_JOB_STATUS_METRIC = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, MYSQL_JOB_STATUS_RETRIEVER_PREFIX, "getLatestJobStatus"); public static final String GET_LATEST_FLOW_STATUS_METRIC = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, MYSQL_JOB_STATUS_RETRIEVER_PREFIX, "getLatestFlowStatus"); public static final String GET_LATEST_FLOW_GROUP_STATUS_METRIC = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, MYSQL_JOB_STATUS_RETRIEVER_PREFIX, "getLatestFlowGroupStatus"); public static final String GET_ALL_FLOW_STATUSES_METRIC = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, MYSQL_JOB_STATUS_RETRIEVER_PREFIX, "getAllFlowStatuses"); @Getter private final MysqlJobStatusStateStore<State> stateStore; @Inject public MysqlJobStatusRetriever(Config config, MultiContextIssueRepository issueRepository) throws ReflectiveOperationException { super(ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY, ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED), issueRepository); config = config.getConfig(MYSQL_JOB_STATUS_RETRIEVER_PREFIX).withFallback(config); this.stateStore = (MysqlJobStatusStateStoreFactory.class.newInstance()).createStateStore(config, State.class); } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId) { String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); List<State> jobStatusStates = timeOpAndWrapIOException(() -> this.stateStore.getAll(storeName, flowExecutionId), GET_LATEST_FLOW_STATUS_METRIC); return asJobStatuses(jobStatusStates); } @Override public Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup, long flowExecutionId, String jobName, String jobGroup) { String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); String tableName = KafkaJobStatusMonitor.jobStatusTableName(flowExecutionId, jobGroup, jobName); List<State> jobStatusStates = timeOpAndWrapIOException(() -> this.stateStore.getAll(storeName, tableName), GET_LATEST_JOB_STATUS_METRIC); return asJobStatuses(jobStatusStates); } @Override public List<FlowStatus> getFlowStatusesForFlowGroupExecutions(String flowGroup, int countJobStatusesPerFlowName) { String storeNamePrefix = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, ""); // TODO: optimize as needed: returned `List<State>` may be large, since encompassing every execution of every flow (in group)! List<State> jobStatusStates = timeOpAndWrapIOException(() -> this.stateStore.getAllWithPrefix(storeNamePrefix), GET_LATEST_FLOW_GROUP_STATUS_METRIC); return asFlowStatuses(groupByFlowExecutionAndRetainLatest(flowGroup, jobStatusStates, countJobStatusesPerFlowName)); } @Override public List<Long> getLatestExecutionIdsForFlow(String flowName, String flowGroup, int count) { String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); List<State> jobStatusStates = timeOpAndWrapIOException(() -> this.stateStore.getAll(storeName), GET_ALL_FLOW_STATUSES_METRIC); return getLatestExecutionIds(jobStatusStates, count); } private List<State> timeOpAndWrapIOException(SupplierThrowingIO<List<State>> states, String timerMetricName) { try (Timer.Context context = this.metricContext.contextAwareTimer(timerMetricName).time()) { return states.get(); } catch (IOException e) { throw new RuntimeException(e); } } private List<Long> getLatestExecutionIds(List<State> jobStatusStates, int count) { // `distinct()`, to avoid each flow execution ID replicating as many times as it has child jobs Iterator<Long> flowExecutionIds = jobStatusStates.stream().map(this::getFlowExecutionId).distinct().iterator(); return Ordering.<Long>natural().greatestOf(flowExecutionIds, count); } }
3,828
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/topology/ConfigBasedTopologySpecFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.topology; import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.Collections; import org.apache.commons.lang3.reflect.ConstructorUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; @Alpha @Singleton public class ConfigBasedTopologySpecFactory implements TopologySpecFactory { private static final Splitter SPLIT_BY_COMMA = Splitter.on(",").omitEmptyStrings().trimResults(); private final Config _config; private final Logger _log; private final ClassAliasResolver<SpecExecutor> _aliasResolver; public ConfigBasedTopologySpecFactory(Config config) { this(config, Optional.<Logger>absent()); } @Inject public ConfigBasedTopologySpecFactory(Config config, Optional<Logger> log) { Preconditions.checkNotNull(config, "Config should not be null"); _log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass()); _config = config; _aliasResolver = new ClassAliasResolver<>(SpecExecutor.class); } @Override public Collection<TopologySpec> getTopologies() { if (!_config.hasPath(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY)) { return Collections.EMPTY_LIST; } Collection<TopologySpec> topologySpecs = Lists.newArrayList(); Collection<String> topologyNames = SPLIT_BY_COMMA.splitToList( _config.getString(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY)); for (String topologyName : topologyNames) { Preconditions.checkArgument(_config.hasPath(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + topologyName), "Config does not contain Topology Factory descriptor for Topology " + topologyName); Config topologyConfig = _config.getConfig(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + topologyName); String description = ConfigUtils.getString(topologyConfig, ServiceConfigKeys.TOPOLOGYSPEC_DESCRIPTION_KEY, "NA"); String version = ConfigUtils.getString(topologyConfig, ServiceConfigKeys.TOPOLOGYSPEC_VERSION_KEY, "-1"); String specExecutorClass = ServiceConfigKeys.DEFAULT_SPEC_EXECUTOR; if (topologyConfig.hasPath(ServiceConfigKeys.SPEC_EXECUTOR_KEY)) { specExecutorClass = topologyConfig.getString(ServiceConfigKeys.SPEC_EXECUTOR_KEY); } SpecExecutor specExecutor; try { _log.info("Using SpecProducer class name/alias " + specExecutorClass); specExecutor = (SpecExecutor) ConstructorUtils .invokeConstructor(Class.forName(_aliasResolver .resolve(specExecutorClass)), topologyConfig); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) { if (e.getCause() != null) { throw new RuntimeException(e.getCause()); } else { throw new RuntimeException(e); } } TopologySpec.Builder topologySpecBuilder = TopologySpec .builder(topologyConfig.getString(ServiceConfigKeys.TOPOLOGYSPEC_URI_KEY)) .withConfig(topologyConfig) .withDescription(description) .withVersion(version) .withSpecExecutor(specExecutor); topologySpecs.add(topologySpecBuilder.build()); } return topologySpecs; } }
3,829
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/topology/TopologySpecFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.topology; import java.util.Collection; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.TopologySpec; /*** * A {@link TopologySpec} Factory that creates or generates the {@link TopologySpec} to be used. */ @Alpha public interface TopologySpecFactory { /*** * Create or generate {@link TopologySpec}s. * @return Collection of {@link TopologySpec}s. */ Collection<TopologySpec> getTopologies(); }
3,830
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/ControllerUserDefinedMessageHandlerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Collections; import java.util.List; import java.util.Properties; import org.apache.helix.NotificationContext; import org.apache.helix.messaging.handling.HelixTaskResult; import org.apache.helix.messaging.handling.MessageHandler; import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory; import org.apache.helix.model.Message; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.service.FlowConfig; import org.apache.gobblin.service.FlowConfigResourceLocalHandler; import org.apache.gobblin.service.FlowConfigsResourceHandler; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.restli.FlowConfigUtils; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; /** * A custom {@link MultiTypeMessageHandlerFactory} for {@link org.apache.gobblin.service.modules.core.ControllerUserDefinedMessageHandlerFactory}s that * handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}. */ @AllArgsConstructor class ControllerUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory { private final boolean flowCatalogLocalCommit; private final GobblinServiceJobScheduler jobScheduler; private final FlowConfigsResourceHandler resourceHandler; private final String serviceName; @Override public MessageHandler createHandler(Message message, NotificationContext context) { return new ControllerUserDefinedMessageHandler(message, context, serviceName, flowCatalogLocalCommit, jobScheduler, resourceHandler); } @Override public String getMessageType() { return Message.MessageType.USER_DEFINE_MSG.toString(); } public List<String> getMessageTypes() { return Collections.singletonList(getMessageType()); } @Override public void reset() { } /** * A custom {@link MessageHandler} for handling user-defined messages to the controller. */ @Slf4j private static class ControllerUserDefinedMessageHandler extends MessageHandler { private final boolean flowCatalogLocalCommit; private final GobblinServiceJobScheduler jobScheduler; private final FlowConfigsResourceHandler resourceHandler; private final String serviceName; public ControllerUserDefinedMessageHandler(Message message, NotificationContext context, String serviceName, boolean flowCatalogLocalCommit, GobblinServiceJobScheduler scheduler, FlowConfigsResourceHandler resourceHandler) { super(message, context); this.serviceName = serviceName; this.flowCatalogLocalCommit = flowCatalogLocalCommit; this.jobScheduler = scheduler; this.resourceHandler = resourceHandler; } /** * Method to handle add flow config message forwarded by Helix (Standby) node. * In load balance mode, the FlowCatalog I/O was handled on standby when receiving Restli, so only need to handle * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onAddSpec(Spec)} part. * Otherwise, we have to handle both FlowCatalog I/O and {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onAddSpec(Spec)}. * * Please refer to {@link FlowConfigResourceLocalHandler#createFlowConfig(FlowConfig)}. It will handle both FlowCatalog I/O and * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onAddSpec(Spec)} in non-balance mode. */ private void handleAdd(String msg) throws IOException { FlowConfig config = FlowConfigUtils.deserializeFlowConfig(msg); if (this.flowCatalogLocalCommit) { // in balance mode, flow spec is already added in flow catalog on standby node. FlowSpec flowSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(config); log.info("Only handle add {} scheduling because flow catalog is committed locally on standby.", flowSpec); jobScheduler.onAddSpec(flowSpec); } else { resourceHandler.createFlowConfig(config); } } /** * Method to handle add flow config message forwarded by Helix (Standby) node. * In load balance mode, the FlowCatalog I/O was handled on standby when receiving Restli, so only need to handle * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onUpdateSpec(Spec)} part. * Otherwise, we have to handle both FlowCatalog I/O and {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onUpdateSpec(Spec)}. * * Please refer to {@link FlowConfigResourceLocalHandler#updateFlowConfig(FlowId, FlowConfig)}. It will handle both FlowCatalog I/O and * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onUpdateSpec(Spec)} in non-balance mode. */ private void handleUpdate(String msg) throws IOException { FlowConfig config = FlowConfigUtils.deserializeFlowConfig(msg); if (flowCatalogLocalCommit) { // in balance mode, flow spec is already updated in flow catalog on standby node. FlowSpec flowSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(config); log.info("Only handle update {} scheduling because flow catalog is committed locally on standby.", flowSpec); jobScheduler.onUpdateSpec(flowSpec); } else { resourceHandler.updateFlowConfig(config.getId(), config); } } /** * Method to handle add flow config message forwarded by Helix (Standby) node. * In load balance mode, the FlowCatalog I/O was handled on standby when receiving Restli, so only need to handle * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onDeleteSpec(URI, String, Properties)} part. * Otherwise, we have to handle both FlowCatalog I/O and {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onDeleteSpec(URI, String, Properties)}. * * Please refer to {@link FlowConfigResourceLocalHandler#deleteFlowConfig(FlowId, Properties)}. It will handle both FlowCatalog I/O and * {@link org.apache.gobblin.runtime.api.SpecCatalogListener#onDeleteSpec(URI, String, Properties)} in non-balance mode. */ private void handleDelete(String msg) throws IOException { try { FlowId id = FlowConfigUtils.deserializeFlowId(msg); if (flowCatalogLocalCommit) { // in balance mode, flow spec is already deleted in flow catalog on standby node. URI flowUri = FlowSpec.Utils.createFlowSpecUri(id); log.info("Only handle update {} scheduling because flow catalog is committed locally on standby.", flowUri); jobScheduler.onDeleteSpec(flowUri, FlowSpec.Builder.DEFAULT_VERSION); } else { resourceHandler.deleteFlowConfig(id, new Properties()); } } catch (URISyntaxException e) { throw new IOException(e); } } @Override public HelixTaskResult handleMessage() throws InterruptedException { if (jobScheduler.isActive()) { // we want to make sure current node is in active state String msg = _message.getAttribute(Message.Attributes.INNER_MESSAGE); log.info("{} ControllerUserDefinedMessage received : {}, type {}", this.serviceName, msg, _message.getMsgSubType()); try { if (_message.getMsgSubType().equals(ServiceConfigKeys.HELIX_FLOWSPEC_ADD)) { handleAdd(msg); } else if (_message.getMsgSubType().equals(ServiceConfigKeys.HELIX_FLOWSPEC_REMOVE)) { handleDelete(msg); } else if (_message.getMsgSubType().equals(ServiceConfigKeys.HELIX_FLOWSPEC_UPDATE)) { handleUpdate(msg); } } catch (IOException e) { log.error("Cannot process Helix message.", e); HelixTaskResult helixTaskResult = new HelixTaskResult(); helixTaskResult.setSuccess(false); return helixTaskResult; } } else { String msg = _message.getAttribute(Message.Attributes.INNER_MESSAGE); log.error("ControllerUserDefinedMessage received but ignored due to not in active mode: {}, type {}", msg, _message.getMsgSubType()); } HelixTaskResult helixTaskResult = new HelixTaskResult(); helixTaskResult.setSuccess(true); return helixTaskResult; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { log.error( String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type)); } } }
3,831
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/D2Announcer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; /** * Interface for marking up/down D2 servers on gobblin service startup. This is only required if using delayed announcement. */ public interface D2Announcer { /** * Mark up this host's D2 server */ void markUpServer(); /** * Mark down this host's D2 server */ void markDownServer(); }
3,832
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/GobblinServiceGuiceModule.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; import java.util.Objects; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.MultiActiveLeaseArbiter; import org.apache.gobblin.runtime.api.MysqlMultiActiveLeaseArbiter; import org.apache.gobblin.runtime.dag_action_store.MysqlDagActionStore; import org.apache.gobblin.service.modules.orchestration.FlowTriggerHandler; import org.apache.gobblin.service.modules.orchestration.UserQuotaManager; import org.apache.gobblin.service.modules.restli.GobblinServiceFlowConfigV2ResourceHandlerWithWarmStandby; import org.apache.gobblin.service.modules.restli.GobblinServiceFlowExecutionResourceHandlerWithWarmStandby; import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton; import org.apache.gobblin.service.monitoring.DagActionStoreChangeMonitor; import org.apache.gobblin.service.monitoring.DagActionStoreChangeMonitorFactory; import org.apache.gobblin.service.monitoring.GitConfigMonitor; import org.apache.helix.HelixManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; import com.google.common.eventbus.EventBus; import com.google.inject.Binder; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Module; import com.google.inject.Provider; import com.google.inject.multibindings.OptionalBinder; import com.google.inject.name.Names; import com.typesafe.config.Config; import javax.inject.Singleton; import org.apache.gobblin.restli.EmbeddedRestliServer; import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment; import org.apache.gobblin.runtime.instance.StandardGobblinInstanceLauncher; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog; import org.apache.gobblin.runtime.troubleshooter.InMemoryMultiContextIssueRepository; import org.apache.gobblin.runtime.troubleshooter.JobIssueEventHandler; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.service.FlowConfigResourceLocalHandler; import org.apache.gobblin.service.FlowConfigV2ResourceLocalHandler; import org.apache.gobblin.service.FlowConfigsResource; import org.apache.gobblin.service.FlowConfigsResourceHandler; import org.apache.gobblin.service.FlowConfigsV2Resource; import org.apache.gobblin.service.FlowConfigsV2ResourceHandler; import org.apache.gobblin.service.FlowExecutionResource; import org.apache.gobblin.service.FlowExecutionResourceHandler; import org.apache.gobblin.service.FlowExecutionResourceLocalHandler; import org.apache.gobblin.service.FlowStatusResource; import org.apache.gobblin.service.GroupOwnershipService; import org.apache.gobblin.service.NoopRequesterService; import org.apache.gobblin.service.RequesterService; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.db.ServiceDatabaseManager; import org.apache.gobblin.service.modules.db.ServiceDatabaseProvider; import org.apache.gobblin.service.modules.db.ServiceDatabaseProviderImpl; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.service.modules.restli.GobblinServiceFlowConfigResourceHandler; import org.apache.gobblin.service.modules.restli.GobblinServiceFlowConfigV2ResourceHandler; import org.apache.gobblin.service.modules.restli.GobblinServiceFlowExecutionResourceHandler; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; import org.apache.gobblin.service.modules.topology.TopologySpecFactory; import org.apache.gobblin.service.modules.troubleshooter.MySqlMultiContextIssueRepository; import org.apache.gobblin.service.modules.utils.HelixUtils; import org.apache.gobblin.runtime.util.InjectionNames; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.apache.gobblin.service.monitoring.FsJobStatusRetriever; import org.apache.gobblin.service.monitoring.JobStatusRetriever; import org.apache.gobblin.service.monitoring.KafkaJobStatusMonitor; import org.apache.gobblin.service.monitoring.KafkaJobStatusMonitorFactory; import org.apache.gobblin.service.monitoring.SpecStoreChangeMonitor; import org.apache.gobblin.service.monitoring.SpecStoreChangeMonitorFactory; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; public class GobblinServiceGuiceModule implements Module { private static final Logger LOGGER = LoggerFactory.getLogger(GobblinServiceGuiceModule.class); private static final String JOB_STATUS_RETRIEVER_CLASS_KEY = "jobStatusRetriever.class"; GobblinServiceConfiguration serviceConfig; public GobblinServiceGuiceModule(GobblinServiceConfiguration serviceConfig) { this.serviceConfig = Objects.requireNonNull(serviceConfig); } @Override public void configure(Binder binder) { LOGGER.info("Configuring bindings for the following service settings: {}", serviceConfig); // In the current code base, we frequently inject classes instead of interfaces // As a result, even when the binding is missing, Guice will create an instance of the // the class and inject it. This interferes with disabling of different services and // components, because without explicit bindings they will get instantiated anyway. binder.requireExplicitBindings(); // Optional binder will find the existing binding for T and create additional binding for Optional<T>. // If none of the specific class binding exist, optional will be "absent". OptionalBinder.newOptionalBinder(binder, Logger.class); binder.bind(Logger.class).toInstance(LoggerFactory.getLogger(GobblinServiceManager.class)); binder.bind(Config.class).toInstance(serviceConfig.getInnerConfig()); binder.bind(GobblinServiceConfiguration.class).toInstance(serviceConfig); // Used by TopologyCatalog and FlowCatalog GobblinInstanceEnvironment gobblinInstanceEnvironment = StandardGobblinInstanceLauncher.builder() .withLog(LoggerFactory.getLogger(GobblinServiceManager.class)) .setInstrumentationEnabled(true) .withSysConfig(serviceConfig.getInnerConfig()) .build(); binder.bind(GobblinInstanceEnvironment.class).toInstance(gobblinInstanceEnvironment); binder.bind(EventBus.class) .annotatedWith(Names.named(GobblinServiceManager.SERVICE_EVENT_BUS_NAME)) .toInstance(new EventBus(GobblinServiceManager.class.getSimpleName())); binder.bindConstant().annotatedWith(Names.named(InjectionNames.SERVICE_NAME)).to(serviceConfig.getServiceName()); binder.bindConstant() .annotatedWith(Names.named(InjectionNames.FORCE_LEADER)) .to(ConfigUtils.getBoolean(serviceConfig.getInnerConfig(), ServiceConfigKeys.FORCE_LEADER, ServiceConfigKeys.DEFAULT_FORCE_LEADER)); binder.bindConstant() .annotatedWith(Names.named(InjectionNames.FLOW_CATALOG_LOCAL_COMMIT)) .to(serviceConfig.isFlowCatalogLocalCommit()); binder.bindConstant() .annotatedWith(Names.named(InjectionNames.WARM_STANDBY_ENABLED)) .to(serviceConfig.isWarmStandbyEnabled()); binder.bindConstant() .annotatedWith(Names.named(InjectionNames.MULTI_ACTIVE_SCHEDULER_ENABLED)) .to(serviceConfig.isMultiActiveSchedulerEnabled()); OptionalBinder.newOptionalBinder(binder, DagActionStore.class); if (serviceConfig.isWarmStandbyEnabled()) { binder.bind(DagActionStore.class).to(MysqlDagActionStore.class); binder.bind(FlowConfigsResourceHandler.class).to(GobblinServiceFlowConfigResourceHandler.class); binder.bind(FlowConfigsV2ResourceHandler.class).to(GobblinServiceFlowConfigV2ResourceHandlerWithWarmStandby.class); binder.bind(FlowExecutionResourceHandler.class).to(GobblinServiceFlowExecutionResourceHandlerWithWarmStandby.class); } else { binder.bind(FlowConfigsResourceHandler.class).to(GobblinServiceFlowConfigResourceHandler.class); binder.bind(FlowConfigsV2ResourceHandler.class).to(GobblinServiceFlowConfigV2ResourceHandler.class); binder.bind(FlowExecutionResourceHandler.class).to(GobblinServiceFlowExecutionResourceHandler.class); } OptionalBinder.newOptionalBinder(binder, MultiActiveLeaseArbiter.class); OptionalBinder.newOptionalBinder(binder, FlowTriggerHandler.class); if (serviceConfig.isMultiActiveSchedulerEnabled()) { binder.bind(MultiActiveLeaseArbiter.class).to(MysqlMultiActiveLeaseArbiter.class); binder.bind(FlowTriggerHandler.class); } binder.bind(FlowConfigsResource.class); binder.bind(FlowConfigsV2Resource.class); binder.bind(FlowStatusResource.class); binder.bind(FlowExecutionResource.class); binder.bind(FlowConfigResourceLocalHandler.class); binder.bind(FlowConfigV2ResourceLocalHandler.class); binder.bind(FlowExecutionResourceLocalHandler.class); binder.bindConstant().annotatedWith(Names.named(FlowConfigsResource.INJECT_READY_TO_USE)).to(Boolean.TRUE); binder.bindConstant().annotatedWith(Names.named(FlowConfigsV2Resource.INJECT_READY_TO_USE)).to(Boolean.TRUE); binder.bind(RequesterService.class) .to(NoopRequesterService.class); binder.bind(SharedFlowMetricsSingleton.class); OptionalBinder.newOptionalBinder(binder, TopologyCatalog.class); if (serviceConfig.isTopologyCatalogEnabled()) { binder.bind(TopologyCatalog.class); } if (serviceConfig.isTopologySpecFactoryEnabled()) { binder.bind(TopologySpecFactory.class) .to(getClassByNameOrAlias(TopologySpecFactory.class, serviceConfig.getInnerConfig(), ServiceConfigKeys.TOPOLOGYSPEC_FACTORY_KEY, ServiceConfigKeys.DEFAULT_TOPOLOGY_SPEC_FACTORY)); } OptionalBinder.newOptionalBinder(binder, DagManager.class); if (serviceConfig.isDagManagerEnabled()) { binder.bind(DagManager.class); } OptionalBinder.newOptionalBinder(binder, HelixManager.class); if (serviceConfig.isHelixManagerEnabled()) { binder.bind(HelixManager.class) .toInstance(buildHelixManager(serviceConfig.getInnerConfig(), serviceConfig.getInnerConfig().getString(ServiceConfigKeys.ZK_CONNECTION_STRING_KEY))); } else { LOGGER.info("No ZooKeeper connection string. Running in single instance mode."); } OptionalBinder.newOptionalBinder(binder, FlowCatalog.class); if (serviceConfig.isFlowCatalogEnabled()) { binder.bind(FlowCatalog.class); } if (serviceConfig.isJobStatusMonitorEnabled()) { binder.bind(KafkaJobStatusMonitor.class).toProvider(KafkaJobStatusMonitorFactory.class).in(Singleton.class); } binder.bind(FlowStatusGenerator.class); if (serviceConfig.isSchedulerEnabled()) { binder.bind(Orchestrator.class); binder.bind(SchedulerService.class); binder.bind(GobblinServiceJobScheduler.class); OptionalBinder.newOptionalBinder(binder, UserQuotaManager.class); binder.bind(UserQuotaManager.class) .to(getClassByNameOrAlias(UserQuotaManager.class, serviceConfig.getInnerConfig(), ServiceConfigKeys.QUOTA_MANAGER_CLASS, ServiceConfigKeys.DEFAULT_QUOTA_MANAGER)); } if (serviceConfig.isGitConfigMonitorEnabled()) { binder.bind(GitConfigMonitor.class); } binder.bind(GroupOwnershipService.class) .to(getClassByNameOrAlias(GroupOwnershipService.class, serviceConfig.getInnerConfig(), ServiceConfigKeys.GROUP_OWNERSHIP_SERVICE_CLASS, ServiceConfigKeys.DEFAULT_GROUP_OWNERSHIP_SERVICE)); binder.bind(JobStatusRetriever.class) .to(getClassByNameOrAlias(JobStatusRetriever.class, serviceConfig.getInnerConfig(), JOB_STATUS_RETRIEVER_CLASS_KEY, FsJobStatusRetriever.class.getName())); if (serviceConfig.isRestLIServerEnabled()) { binder.bind(EmbeddedRestliServer.class).toProvider(EmbeddedRestliServerProvider.class); } if (serviceConfig.isWarmStandbyEnabled()) { binder.bind(SpecStoreChangeMonitor.class).toProvider(SpecStoreChangeMonitorFactory.class).in(Singleton.class); binder.bind(DagActionStoreChangeMonitor.class).toProvider(DagActionStoreChangeMonitorFactory.class).in(Singleton.class); } binder.bind(GobblinServiceManager.class); binder.bind(ServiceDatabaseProvider.class).to(ServiceDatabaseProviderImpl.class); binder.bind(ServiceDatabaseProviderImpl.Configuration.class); binder.bind(ServiceDatabaseManager.class); binder.bind(MultiContextIssueRepository.class) .to(getClassByNameOrAlias(MultiContextIssueRepository.class, serviceConfig.getInnerConfig(), ServiceConfigKeys.ISSUE_REPO_CLASS, InMemoryMultiContextIssueRepository.class.getName())); binder.bind(MySqlMultiContextIssueRepository.Configuration.class); binder.bind(InMemoryMultiContextIssueRepository.Configuration.class); binder.bind(JobIssueEventHandler.class); binder.bind(D2Announcer.class).to(NoopD2Announcer.class); LOGGER.info("Bindings configured"); } protected HelixManager buildHelixManager(Config config, String zkConnectionString) { String helixClusterName = config.getString(ServiceConfigKeys.HELIX_CLUSTER_NAME_KEY); String helixInstanceName = HelixUtils.buildHelixInstanceName(config, GobblinServiceManager.class.getSimpleName()); LOGGER.info( "Creating Helix cluster if not already present [overwrite = false]: " + zkConnectionString); HelixUtils.createGobblinHelixCluster(zkConnectionString, helixClusterName, false); return HelixUtils.buildHelixManager(helixInstanceName, helixClusterName, zkConnectionString); } protected static <T> Class<? extends T> getClassByNameOrAlias(Class<T> baseClass, Config config, String classPropertyName, String defaultClass) { String className = ConfigUtils.getString(config, classPropertyName, defaultClass); ClassAliasResolver<T> aliasResolver = new ClassAliasResolver<T>(baseClass); try { return (Class<? extends T>) Class.forName(aliasResolver.resolve(className)); } catch (ClassNotFoundException e) { throw new RuntimeException( "Cannot resolve the class '" + className + "'. Check that property '" + classPropertyName + "' points to a valid class name or alias.", e); } } public static class EmbeddedRestliServerProvider implements Provider<EmbeddedRestliServer> { Injector injector; @Inject public EmbeddedRestliServerProvider(Injector injector) { this.injector = injector; } @Override public EmbeddedRestliServer get() { return EmbeddedRestliServer.builder() .resources(Lists.newArrayList(FlowConfigsResource.class, FlowConfigsV2Resource.class)) .injector(injector) .build(); } } }
3,833
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/NoopD2Announcer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; public class NoopD2Announcer implements D2Announcer { public void markUpServer() {} public void markDownServer() {} }
3,834
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/GobblinServiceManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; import java.io.IOException; import java.net.URI; import java.util.Collection; import java.util.Map; import java.util.Objects; import java.util.Properties; import java.util.concurrent.TimeUnit; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.lang3.ObjectUtils; import org.apache.gobblin.service.modules.orchestration.UserQuotaManager; import org.apache.gobblin.service.monitoring.DagActionStoreChangeMonitor; import org.apache.gobblin.service.monitoring.GitConfigMonitor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixManager; import org.apache.helix.NotificationContext; import org.apache.helix.api.listeners.ControllerChangeListener; import org.apache.helix.model.Message; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.codahale.metrics.MetricRegistry; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.eventbus.EventBus; import com.google.inject.Guice; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Stage; import com.linkedin.data.template.StringMap; import com.linkedin.r2.RemoteInvocationException; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import javax.annotation.Nullable; import javax.inject.Named; import lombok.Getter; import lombok.Setter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.ContextAwareHistogram; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.restli.EmbeddedRestliServer; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.runtime.app.ApplicationException; import org.apache.gobblin.runtime.app.ApplicationLauncher; import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.service.FlowConfig; import org.apache.gobblin.service.FlowConfigClient; import org.apache.gobblin.service.FlowConfigsResource; import org.apache.gobblin.service.FlowConfigsResourceHandler; import org.apache.gobblin.service.FlowConfigsV2Resource; import org.apache.gobblin.service.FlowConfigsV2ResourceHandler; import org.apache.gobblin.service.FlowExecutionResourceHandler; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.GroupOwnershipService; import org.apache.gobblin.service.Schedule; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.db.ServiceDatabaseManager; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; import org.apache.gobblin.service.modules.topology.TopologySpecFactory; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.apache.gobblin.service.monitoring.KafkaJobStatusMonitor; import org.apache.gobblin.service.monitoring.SpecStoreChangeMonitor; import org.apache.gobblin.util.ConfigUtils; @Alpha public class GobblinServiceManager implements ApplicationLauncher, StandardMetricsBridge { // Command line options // These two options are required to launch GobblinServiceManager. public static final String SERVICE_NAME_OPTION_NAME = "service_name"; public static final String SERVICE_ID_OPTION_NAME = "service_id"; public static final String SERVICE_EVENT_BUS_NAME = "GobblinServiceManagerEventBus"; private static final Logger LOGGER = LoggerFactory.getLogger(GobblinServiceManager.class); protected final ServiceBasedAppLauncher serviceLauncher; private volatile boolean stopInProgress = false; // An EventBus used for communications between services running in the ApplicationMaster @Inject @Named(SERVICE_EVENT_BUS_NAME) @Getter protected EventBus eventBus; protected final FileSystem fs; protected final Path serviceWorkDir; @Getter protected final GobblinServiceConfiguration configuration; @Inject(optional = true) protected TopologyCatalog topologyCatalog; @Inject(optional = true) @Getter protected FlowCatalog flowCatalog; @Inject(optional = true) @Getter protected GobblinServiceJobScheduler scheduler; @Inject @Getter protected FlowConfigsResourceHandler resourceHandler; @Inject @Getter protected FlowConfigsV2ResourceHandler v2ResourceHandler; @Inject @Getter protected FlowExecutionResourceHandler flowExecutionResourceHandler; @Inject @Getter protected FlowStatusGenerator flowStatusGenerator; @Inject @Getter protected GroupOwnershipService groupOwnershipService; @Inject @Getter private Injector injector; protected boolean flowCatalogLocalCommit; @Inject(optional = true) @Getter protected Orchestrator orchestrator; @Inject(optional = true) protected EmbeddedRestliServer restliServer; @Inject(optional = true) protected TopologySpecFactory topologySpecFactory; @Inject protected SchedulerService schedulerService; @Inject(optional = true) protected Optional<HelixManager> helixManager; @Inject(optional = true) protected GitConfigMonitor gitConfigMonitor; @Inject(optional = true) @Getter protected DagManager dagManager; @Inject(optional = true) protected KafkaJobStatusMonitor jobStatusMonitor; @Inject protected MultiContextIssueRepository issueRepository; @Inject protected ServiceDatabaseManager databaseManager; @Inject(optional=true) @Getter protected Optional<UserQuotaManager> quotaManager; protected Optional<HelixLeaderState> helixLeaderGauges; @Inject(optional = true) protected D2Announcer d2Announcer; private final MetricContext metricContext; private final Metrics metrics; @Inject(optional = true) protected SpecStoreChangeMonitor specStoreChangeMonitor; @Inject(optional = true) protected DagActionStoreChangeMonitor dagActionStoreChangeMonitor; @Inject protected GobblinServiceManager(GobblinServiceConfiguration configuration) throws Exception { this.configuration = Objects.requireNonNull(configuration); Properties appLauncherProperties = ConfigUtils.configToProperties( ConfigUtils.getConfigOrEmpty(configuration.getInnerConfig(), ServiceConfigKeys.GOBBLIN_SERVICE_APP_LAUNCHER_PREFIX) .withFallback(configuration.getInnerConfig())); // Done to preserve backwards compatibility with the previously hard-coded timeout of 5 minutes if (!appLauncherProperties.contains(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS)) { appLauncherProperties.setProperty(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS, Long.toString(300)); } this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState( configuration.getInnerConfig()), this.getClass()); this.metrics = new Metrics(this.metricContext, configuration.getInnerConfig()); this.serviceLauncher = new ServiceBasedAppLauncher(appLauncherProperties, configuration.getServiceName()); this.fs = buildFileSystem(configuration.getInnerConfig()); this.serviceWorkDir = ObjectUtils.firstNonNull(configuration.getServiceWorkDir(), getServiceWorkDirPath(this.fs, configuration.getServiceName(), configuration.getServiceId())); initializeHelixLeaderGauge(); } public static GobblinServiceManager create(String serviceName, String serviceId, Config config, @Nullable Path serviceWorkDir) { return create(new GobblinServiceConfiguration(serviceName, serviceId, config, serviceWorkDir)); } public static GobblinServiceManager create(GobblinServiceConfiguration serviceConfiguration) { GobblinServiceGuiceModule guiceModule = new GobblinServiceGuiceModule(serviceConfiguration); Injector injector = Guice.createInjector(Stage.PRODUCTION, guiceModule); return injector.getInstance(GobblinServiceManager.class); } public URI getRestLiServerListeningURI() { if (restliServer == null) { throw new IllegalStateException("Restli server does not exist because it was not configured or disabled"); } return restliServer.getListeningURI(); } private void initializeHelixLeaderGauge() { helixLeaderGauges = Optional.of(new HelixLeaderState()); String helixLeaderStateGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.HELIX_LEADER_STATE); ContextAwareGauge<Integer> gauge = metricContext.newContextAwareGauge(helixLeaderStateGaugeName, () -> helixLeaderGauges.get().state.getValue()); metricContext.register(helixLeaderStateGaugeName, gauge); } @VisibleForTesting public boolean isLeader() { // If helix manager is absent, then this standalone instance hence leader // .. else check if this master of cluster return !helixManager.isPresent() || helixManager.get().isLeader(); } private FileSystem buildFileSystem(Config config) throws IOException { return config.hasPath(ConfigurationKeys.FS_URI_KEY) ? FileSystem .get(URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)), new Configuration()) : FileSystem.get(new Configuration()); } private Path getServiceWorkDirPath(FileSystem fs, String serviceName, String serviceId) { return new Path(fs.getHomeDirectory(), serviceName + Path.SEPARATOR + serviceId); } /** * Handle leadership change. * @param changeContext notification context */ private void handleLeadershipChange(NotificationContext changeContext) { if (this.helixManager.isPresent()) { if (this.helixManager.get().isLeader()) { LOGGER.info("Leader notification for {} HM.isLeader {}", this.helixManager.get().getInstanceName(), this.helixManager.get().isLeader()); if (configuration.isSchedulerEnabled()) { LOGGER.info("Gobblin Service is now running in master instance mode, enabling Scheduler."); this.scheduler.setActive(true); } if (helixLeaderGauges.isPresent()) { helixLeaderGauges.get().setState(LeaderState.MASTER); } if (configuration.isGitConfigMonitorEnabled()) { this.gitConfigMonitor.setActive(true); } // TODO: surround by try/catch to disconnect from Helix and fail the leader transition if DagManager is not // transitioned properly if (configuration.isDagManagerEnabled()) { //Activate DagManager only if TopologyCatalog is initialized. If not; skip activation. if (this.topologyCatalog.getInitComplete().getCount() == 0) { this.dagManager.setActive(true); this.eventBus.register(this.dagManager); } } if (configuration.isOnlyAnnounceLeader()) { this.d2Announcer.markUpServer(); } } else { LOGGER.info("Leader lost notification for {} HM.isLeader {}", this.helixManager.get().getInstanceName(), this.helixManager.get().isLeader()); if (configuration.isSchedulerEnabled() && !configuration.isMultiActiveSchedulerEnabled()) { LOGGER.info("Gobblin Service is now running in non-leader mode without multi-active scheduler enabled, " + "disabling Scheduler."); this.scheduler.setActive(false); } if (helixLeaderGauges.isPresent()) { helixLeaderGauges.get().setState(LeaderState.SLAVE); } if (configuration.isGitConfigMonitorEnabled()) { this.gitConfigMonitor.setActive(false); } if (configuration.isDagManagerEnabled()) { this.dagManager.setActive(false); this.eventBus.unregister(this.dagManager); } if (configuration.isOnlyAnnounceLeader()) { this.d2Announcer.markDownServer(); } } } } private void registerServicesInLauncher(){ if (configuration.isTopologyCatalogEnabled()) { this.serviceLauncher.addService(topologyCatalog); } if (configuration.isFlowCatalogEnabled()) { this.serviceLauncher.addService(flowCatalog); if (configuration.isGitConfigMonitorEnabled()) { this.serviceLauncher.addService(gitConfigMonitor); } } if (configuration.isDagManagerEnabled()) { this.serviceLauncher.addService(dagManager); } this.serviceLauncher.addService(databaseManager); this.serviceLauncher.addService(issueRepository); if (configuration.isJobStatusMonitorEnabled()) { this.serviceLauncher.addService(jobStatusMonitor); } if (configuration.isSchedulerEnabled()) { this.serviceLauncher.addService(schedulerService); this.serviceLauncher.addService(scheduler); } if (configuration.isRestLIServerEnabled()) { this.serviceLauncher.addService(restliServer); } if (this.configuration.isWarmStandbyEnabled()) { this.serviceLauncher.addService(specStoreChangeMonitor); this.serviceLauncher.addService(dagActionStoreChangeMonitor); } } private void configureServices(){ if (configuration.isRestLIServerEnabled()) { this.restliServer = EmbeddedRestliServer.builder() .resources(Lists.newArrayList(FlowConfigsResource.class, FlowConfigsV2Resource.class)) .injector(injector) .build(); if (configuration.getInnerConfig().hasPath(ServiceConfigKeys.SERVICE_PORT)) { this.restliServer.setPort(configuration.getInnerConfig().getInt(ServiceConfigKeys.SERVICE_PORT)); } } registerServicesInLauncher(); // Register Scheduler to listen to changes in Flows // In warm standby mode, instead of scheduler we will add orchestrator as listener if(configuration.isWarmStandbyEnabled()) { this.flowCatalog.addListener(this.orchestrator); } else if (configuration.isSchedulerEnabled()) { this.flowCatalog.addListener(this.scheduler); } } private void ensureInjected() { if (v2ResourceHandler == null) { throw new IllegalStateException("GobblinServiceManager should be constructed through Guice dependency injection " + "or through a static factory method"); } } @Override public void start() throws ApplicationException { LOGGER.info("[Init] Starting the Gobblin Service Manager"); ensureInjected(); configureServices(); if (this.helixManager.isPresent()) { connectHelixManager(); } this.eventBus.register(this); this.serviceLauncher.start(); // Wait until spec consumer service is running to set scheduler to active if (this.configuration.isWarmStandbyEnabled()) { while (!this.specStoreChangeMonitor.isRunning()) { try { LOGGER.info("Waiting for SpecStoreChangeMonitor to be started..."); Thread.sleep(10); } catch (InterruptedException e) { LOGGER.warn("Interrupted while waiting for SpecStoreChangeMonitor to be started"); } } } if (this.helixManager.isPresent()) { // Subscribe to leadership changes this.helixManager.get().addControllerListener((ControllerChangeListener) this::handleLeadershipChange); // Update for first time since there might be no notification if (helixManager.get().isLeader()) { if (configuration.isSchedulerEnabled()) { LOGGER.info("[Init] Gobblin Service is running in master instance mode, enabling Scheduler."); this.scheduler.setActive(true); } if (configuration.isGitConfigMonitorEnabled()) { this.gitConfigMonitor.setActive(true); } if (helixLeaderGauges.isPresent()) { helixLeaderGauges.get().setState(LeaderState.MASTER); } } else { if (configuration.isSchedulerEnabled()) { if (configuration.isMultiActiveSchedulerEnabled()) { LOGGER.info("[Init] Gobblin Service enabling scheduler for non-leader since multi-active scheduler enabled"); this.scheduler.setActive(true); } else { LOGGER.info("[Init] Gobblin Service is running in non-leader instance mode, not enabling Scheduler."); } } if (helixLeaderGauges.isPresent()) { helixLeaderGauges.get().setState(LeaderState.SLAVE); } } } else { // No Helix manager, hence standalone service instance // .. designate scheduler to itself LOGGER.info("[Init] Gobblin Service is running in single instance mode, enabling Scheduler."); this.scheduler.setActive(true); if (configuration.isGitConfigMonitorEnabled()) { this.gitConfigMonitor.setActive(true); } } // Announce to d2 after services are initialized regardless of leadership if configuration is not enabled if (!this.configuration.isOnlyAnnounceLeader()) { this.d2Announcer.markUpServer(); } // Populate TopologyCatalog with all Topologies generated by TopologySpecFactory // This has to be done after the topologyCatalog service is launched if (configuration.isTopologySpecFactoryEnabled()) { Collection<TopologySpec> topologySpecs = this.topologySpecFactory.getTopologies(); for (TopologySpec topologySpec : topologySpecs) { this.topologyCatalog.put(topologySpec); } } // Register Orchestrator to listen to changes in topology // This has to be done after topologySpecFactory has updated spec store, so that listeners will have the latest updates. if (configuration.isSchedulerEnabled()) { this.topologyCatalog.addListener(this.orchestrator); } // Notify now topologyCatalog has the right information this.topologyCatalog.getInitComplete().countDown(); //Activate the SpecCompiler, after the topologyCatalog has been initialized. this.orchestrator.getSpecCompiler().setActive(true); //Activate the DagManager service, after the topologyCatalog has been initialized. if (!this.helixManager.isPresent() || this.helixManager.get().isLeader()){ if (configuration.isDagManagerEnabled()) { this.dagManager.setActive(true); this.eventBus.register(this.dagManager); } } } @Override public void stop() throws ApplicationException { if (this.stopInProgress) { return; } LOGGER.info("Stopping the Gobblin Service Manager"); this.stopInProgress = true; try { // Stop announcing GaaS instances to d2 when services are stopped if (!configuration.isOnlyAnnounceLeader()) { this.d2Announcer.markDownServer(); } this.serviceLauncher.stop(); } catch (ApplicationException ae) { LOGGER.error("Error while stopping Gobblin Service Manager", ae); } finally { disconnectHelixManager(); } } @VisibleForTesting void connectHelixManager() { try { if (this.helixManager.isPresent()) { this.helixManager.get().connect(); this.helixManager.get() .getMessagingService() .registerMessageHandlerFactory(Message.MessageType.USER_DEFINE_MSG.toString(), new ControllerUserDefinedMessageHandlerFactory(flowCatalogLocalCommit, scheduler, v2ResourceHandler, configuration.getServiceName())); } } catch (Exception e) { LOGGER.error("HelixManager failed to connect", e); throw Throwables.propagate(e); } } @VisibleForTesting void disconnectHelixManager() { if (isHelixManagerConnected()) { if (this.helixManager.isPresent()) { this.helixManager.get().disconnect(); } } } @VisibleForTesting boolean isHelixManagerConnected() { return this.helixManager.isPresent() && this.helixManager.get().isConnected(); } @Override public void close() throws IOException { this.serviceLauncher.close(); } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { return ImmutableList.of(this.metrics); } private class Metrics extends StandardMetrics { public static final String SERVICE_LEADERSHIP_CHANGE = "serviceLeadershipChange"; private ContextAwareHistogram serviceLeadershipChange; public Metrics(final MetricContext metricContext, Config config) { int timeWindowSizeInMinutes = ConfigUtils.getInt(config, ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES, ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES); this.serviceLeadershipChange = metricContext.contextAwareHistogram(SERVICE_LEADERSHIP_CHANGE, timeWindowSizeInMinutes, TimeUnit.MINUTES); this.contextAwareMetrics.add(this.serviceLeadershipChange); } } private static String getServiceId(CommandLine cmd) { return cmd.getOptionValue(SERVICE_ID_OPTION_NAME) == null ? "1" : cmd.getOptionValue(SERVICE_ID_OPTION_NAME); } private static Options buildOptions() { Options options = new Options(); options.addOption("a", SERVICE_NAME_OPTION_NAME, true, "Gobblin Service application's name"); options.addOption("i", SERVICE_ID_OPTION_NAME, true, "Gobblin Service application's ID, " + "this needs to be globally unique"); return options; } private static void printUsage(Options options) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(GobblinServiceManager.class.getSimpleName(), options); } public static void main(String[] args) throws Exception { Options options = buildOptions(); try { CommandLine cmd = new DefaultParser().parse(options, args); if (!cmd.hasOption(SERVICE_NAME_OPTION_NAME)) { printUsage(options); System.exit(1); } if (!cmd.hasOption(SERVICE_ID_OPTION_NAME)) { printUsage(options); LOGGER.warn("Please assign globally unique ID for a GobblinServiceManager instance, or it will use default ID"); } boolean isTestMode = false; if (cmd.hasOption("test_mode")) { isTestMode = Boolean.parseBoolean(cmd.getOptionValue("test_mode", "false")); } Config config = ConfigFactory.load(); GobblinServiceConfiguration serviceConfiguration = new GobblinServiceConfiguration(cmd.getOptionValue(SERVICE_NAME_OPTION_NAME), getServiceId(cmd), config, null); GobblinServiceGuiceModule guiceModule = new GobblinServiceGuiceModule(serviceConfiguration); Injector injector = Guice.createInjector(guiceModule); try (GobblinServiceManager gobblinServiceManager = injector.getInstance(GobblinServiceManager.class)) { gobblinServiceManager.start(); if (isTestMode) { testGobblinService(gobblinServiceManager); } } } catch (ParseException pe) { printUsage(options); System.exit(1); } } // TODO: Remove after adding test cases @SuppressWarnings("DLS_DEAD_LOCAL_STORE") private static void testGobblinService(GobblinServiceManager gobblinServiceManager) { FlowConfigClient client = new FlowConfigClient(String.format("http://localhost:%s/", gobblinServiceManager.restliServer.getPort())); Map<String, String> flowProperties = Maps.newHashMap(); flowProperties.put("param1", "value1"); final String TEST_GROUP_NAME = "testGroup1"; final String TEST_FLOW_NAME = "testFlow1"; final String TEST_SCHEDULE = "0 1/0 * ? * *"; final String TEST_TEMPLATE_URI = "FS:///templates/test.template"; FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME)) .setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE). setRunImmediately(true)) .setProperties(new StringMap(flowProperties)); try { client.createFlowConfig(flowConfig); } catch (RemoteInvocationException e) { throw new RuntimeException(e); } } @Setter private static class HelixLeaderState { private LeaderState state = LeaderState.UNKNOWN; } private enum LeaderState { UNKNOWN(-1), SLAVE(0), MASTER(1); @Getter private int value; LeaderState(int value) { this.value = value; } } }
3,835
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/core/GobblinServiceConfiguration.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.core; import java.util.Objects; import org.apache.hadoop.fs.Path; import com.typesafe.config.Config; import javax.annotation.Nullable; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.util.ConfigUtils; @ToString public class GobblinServiceConfiguration { @Getter private final String serviceName; @Getter private final String serviceId; @Getter private final boolean isWarmStandbyEnabled; @Getter private final boolean isMultiActiveSchedulerEnabled; @Getter private final boolean isTopologyCatalogEnabled; @Getter private final boolean isFlowCatalogEnabled; @Getter private final boolean isSchedulerEnabled; @Getter private final boolean isRestLIServerEnabled; @Getter private final boolean isTopologySpecFactoryEnabled; @Getter private final boolean isGitConfigMonitorEnabled; @Getter private final boolean isDagManagerEnabled; @Getter private final boolean isJobStatusMonitorEnabled; @Getter private final boolean isHelixManagerEnabled; @Getter private final boolean flowCatalogLocalCommit; @Getter private final boolean onlyAnnounceLeader; @Getter private final Config innerConfig; @Getter @Nullable private final Path serviceWorkDir; public GobblinServiceConfiguration(String serviceName, String serviceId, Config config, @Nullable Path serviceWorkDir) { this.serviceName = Objects.requireNonNull(serviceName,"Service name cannot be null"); this.serviceId = Objects.requireNonNull(serviceId,"Service id cannot be null"); this.innerConfig = Objects.requireNonNull(config, "Config cannot be null"); this.serviceWorkDir = serviceWorkDir; isTopologyCatalogEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_TOPOLOGY_CATALOG_ENABLED_KEY, true); isFlowCatalogEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_FLOW_CATALOG_ENABLED_KEY, true); if (isFlowCatalogEnabled) { flowCatalogLocalCommit = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_FLOW_CATALOG_LOCAL_COMMIT, ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_FLOW_CATALOG_LOCAL_COMMIT); isGitConfigMonitorEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_GIT_CONFIG_MONITOR_ENABLED_KEY, false); } else { flowCatalogLocalCommit = false; isGitConfigMonitorEnabled = false; } this.isWarmStandbyEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_WARM_STANDBY_ENABLED_KEY, false); this.isMultiActiveSchedulerEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_MULTI_ACTIVE_SCHEDULER_ENABLED_KEY, false); this.isHelixManagerEnabled = config.hasPath(ServiceConfigKeys.ZK_CONNECTION_STRING_KEY); this.isDagManagerEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY, ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED); this.isJobStatusMonitorEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_JOB_STATUS_MONITOR_ENABLED_KEY, true); this.isSchedulerEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_SCHEDULER_ENABLED_KEY, true); this.isRestLIServerEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_RESTLI_SERVER_ENABLED_KEY, true); this.isTopologySpecFactoryEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_TOPOLOGY_SPEC_FACTORY_ENABLED_KEY, true); this.onlyAnnounceLeader = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_D2_ONLY_ANNOUNCE_LEADER, false); } }
3,836
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FlowGraphConfigurationKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; public class FlowGraphConfigurationKeys { public static final String DATA_NODE_PREFIX = "data.node."; public static final String FLOW_EDGE_PREFIX = "flow.edge."; public static final String FLOW_GRAPH_PREFIX = "flow.graph."; /** * {@link DataNode} related configuration keys. */ public static final String DATA_NODE_CLASS = DATA_NODE_PREFIX + "class"; public static final String DEFAULT_DATA_NODE_CLASS = "org.apache.gobblin.service.modules.flowgraph.BaseDataNode"; public static final String DATA_NODE_ID_KEY = DATA_NODE_PREFIX + "id"; public static final String DATA_NODE_IS_ACTIVE_KEY = DATA_NODE_PREFIX + "isActive"; /** * {@link org.apache.gobblin.service.modules.flowgraph.datanodes.HttpDataNode} related configuration keys. */ public static final String DATA_NODE_HTTP_DOMAIN_KEY = DATA_NODE_PREFIX + "http.domain"; public static final String DATA_NODE_HTTP_AUTHENTICATION_TYPE_KEY = DATA_NODE_PREFIX + "http.authentication.type"; /** * {@link FlowEdge} related configuration keys. */ public static final String FLOW_EDGE_FACTORY_CLASS = FLOW_EDGE_PREFIX + "factory.class"; public static final String DEFAULT_FLOW_EDGE_FACTORY_CLASS = "org.apache.gobblin.service.modules.flowgraph.BaseFlowEdge$Factory"; public static final String FLOW_EDGE_SOURCE_KEY = FLOW_EDGE_PREFIX + "source"; public static final String FLOW_EDGE_DESTINATION_KEY = FLOW_EDGE_PREFIX + "destination"; public static final String FLOW_EDGE_ID_KEY = FLOW_EDGE_PREFIX + "id"; public static final String FLOW_EDGE_NAME_KEY = FLOW_EDGE_PREFIX + "name"; public static final String FLOW_EDGE_IS_ACTIVE_KEY = FLOW_EDGE_PREFIX + "isActive"; public static final String FLOW_EDGE_TEMPLATE_DIR_URI_KEY = FLOW_EDGE_PREFIX + "flowTemplateDirUri"; public static final String FLOW_EDGE_SPEC_EXECUTORS_KEY = FLOW_EDGE_PREFIX + "specExecutors"; public static final String FLOW_EDGE_SPEC_EXECUTOR_CLASS_KEY = "specExecInstance.class"; /** * {@link org.apache.gobblin.service.modules.flowgraph.pathfinder.PathFinder} related configuration keys. */ public static final String FLOW_GRAPH_PATH_FINDER_CLASS = FLOW_GRAPH_PREFIX + "pathfinder.class"; public static final String DEFAULT_FLOW_GRAPH_PATH_FINDER_CLASS = "org.apache.gobblin.service.modules.flowgraph.pathfinder.BFSPathFinder"; }
3,837
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FlowEdge.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.List; import com.typesafe.config.Config; import org.apache.hadoop.security.UserGroupInformation; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.service.modules.template.FlowTemplate; /** * Representation of an edge in a FlowGraph. Each {@link FlowEdge} encapsulates: * <p><ul> * <li> two {@link DataNode}s as its end points * <li>a {@FlowTemplate} that responsible for data movement between the {@DataNode}s. * <li> a list of {@link SpecExecutor}s where the {@link FlowTemplate} can be executed. * </ul></p> and * */ @Alpha public interface FlowEdge { /** * * @return the source {@link DataNode} id of the edge. */ String getSrc(); /** * * @return the destination {@link DataNode} id of the edge. */ String getDest(); /** * * @return the {@link FlowTemplate} that performs the data movement along the edge. */ FlowTemplate getFlowTemplate(); /** * * @return a list of {@link SpecExecutor}s that can execute the {@link FlowTemplate} corresponding to this edge. */ List<SpecExecutor> getExecutors(); /** * Get the properties that defines the {@link FlowEdge}. Encapsulates all the properties from which the {@link FlowEdge} * is instantiated. It also includes properties needed for resolving a {@link org.apache.gobblin.runtime.api.JobTemplate}. * @return the properties of this edge as a {@link Config} object. */ Config getConfig(); /** * A string uniquely identifying the edge. * @return the label of the {@link FlowEdge}. */ String getId(); /** * * @return true if the {@link FlowEdge} is active. */ boolean isActive(); /** * * @param user * @return true if the user has ACL permissions to access the {@link FlowEdge}, */ boolean isAccessible(UserGroupInformation user); }
3,838
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/Dag.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import lombok.Getter; import lombok.Setter; import org.apache.gobblin.annotation.Alpha; /** * An implementation of Dag. Assumes that nodes have unique values. Nodes with duplicate values will produce * unpredictable behavior. */ @Alpha @Getter public class Dag<T> { private List<DagNode<T>> startNodes; private List<DagNode<T>> endNodes; // Map to maintain parent to children mapping. private Map<DagNode, List<DagNode<T>>> parentChildMap; private List<DagNode<T>> nodes; @Setter private String message; @Setter private String flowEvent; // Keep track of when the final flow status is emitted, in milliseconds to avoid many duplicate events @Setter @Getter private long eventEmittedTimeMillis = -1; public Dag(List<DagNode<T>> dagNodes) { this.nodes = dagNodes; //Build dag this.build(); } /** * Constructs the dag from the Node list. */ private void build() { this.startNodes = new ArrayList<>(); this.endNodes = new ArrayList<>(); this.parentChildMap = new HashMap<>(); for (DagNode node : this.nodes) { //If a Node has no parent Node, add it to the list of start Nodes if (node.getParentNodes() == null) { this.startNodes.add(node); } else { List<DagNode> parentNodeList = node.getParentNodes(); for (DagNode parentNode : parentNodeList) { if (parentChildMap.containsKey(parentNode)) { parentChildMap.get(parentNode).add(node); } else { parentChildMap.put(parentNode, Lists.newArrayList(node)); } } } } //Iterate over all the Nodes and add a Node to the list of endNodes if it is not present in the parentChildMap for (DagNode node : this.nodes) { if (!parentChildMap.containsKey(node)) { this.endNodes.add(node); } } } public List<DagNode<T>> getChildren(DagNode node) { return parentChildMap.getOrDefault(node, Collections.EMPTY_LIST); } public List<DagNode<T>> getParents(DagNode node) { return (node.parentNodes != null) ? node.parentNodes : Collections.EMPTY_LIST; } /** * Get the ancestors of a given set of {@link DagNode}s in the {@link Dag}. * @param dagNodes set of nodes in the {@link Dag}. * @return the union of all ancestors of dagNodes in the dag. */ private Set<DagNode<T>> getAncestorNodes(Set<DagNode<T>> dagNodes) { Set<DagNode<T>> ancestorNodes = new HashSet<>(); for (DagNode<T> dagNode : dagNodes) { LinkedList<DagNode<T>> nodesToExpand = Lists.newLinkedList(this.getParents(dagNode)); while (!nodesToExpand.isEmpty()) { DagNode<T> nextNode = nodesToExpand.poll(); ancestorNodes.add(nextNode); nodesToExpand.addAll(this.getParents(nextNode)); } } return ancestorNodes; } /** * This method computes a set of {@link DagNode}s which are the dependency nodes for concatenating this {@link Dag} * with any other {@link Dag}. The set of dependency nodes is the union of: * <p><ul> * <li> The endNodes of this dag which are not forkable, and </li> * <li> The parents of forkable nodes, such that no parent is an ancestor of another parent.</li> * </ul></p> * * @param forkNodes set of nodes of this {@link Dag} which are forkable * @return set of dependency nodes of this dag for concatenation with any other dag. */ public Set<DagNode<T>> getDependencyNodes(Set<DagNode<T>> forkNodes) { Set<DagNode<T>> dependencyNodes = new HashSet<>(); for (DagNode<T> endNode : endNodes) { if (!forkNodes.contains(endNode)) { dependencyNodes.add(endNode); } } //Get all ancestors of non-forkable nodes Set<DagNode<T>> ancestorNodes = this.getAncestorNodes(dependencyNodes); //Add ancestors of the parents of forkable nodes for (DagNode<T> dagNode: forkNodes) { List<DagNode<T>> parentNodes = this.getParents(dagNode); ancestorNodes.addAll(this.getAncestorNodes(Sets.newHashSet(parentNodes))); } for (DagNode<T> dagNode: forkNodes) { List<DagNode<T>> parentNodes = this.getParents(dagNode); for (DagNode<T> parentNode : parentNodes) { //Add parent node of a forkable node as a dependency, only if it is not already an ancestor of another // dependency. if (!ancestorNodes.contains(parentNode)) { dependencyNodes.add(parentNode); } } } return dependencyNodes; } public boolean isEmpty() { return this.nodes.isEmpty(); } /** * Concatenate two dags together. Join the "other" dag to "this" dag and return "this" dag. * The concatenate method ensures that all the jobs of "this" dag (which may have multiple end nodes) * are completed before starting any job of the "other" dag. This is done by adding each endNode of this dag as * a parent of every startNode of the other dag. * * @param other dag to concatenate to this dag * @return the concatenated dag */ public Dag<T> concatenate(Dag<T> other) { return concatenate(other, new HashSet<>()); } /** * Concatenate two dags together. Join the "other" dag to "this" dag and return "this" dag. * The concatenate method ensures that all the jobs of "this" dag (which may have multiple end nodes) * are completed before starting any job of the "other" dag. This is done by adding each endNode of this dag, which is * not a fork node, as a parent of every startNode of the other dag. * * @param other dag to concatenate to this dag * @param forkNodes a set of nodes from this dag which are marked as forkable nodes. Each of these nodes will be added * to the list of end nodes of the concatenated dag. Essentially, a forkable node has no dependents * in the concatenated dag. * @return the concatenated dag */ public Dag<T> concatenate(Dag<T> other, Set<DagNode<T>> forkNodes) { if (other == null || other.isEmpty()) { return this; } if (this.isEmpty()) { return other; } for (DagNode node : getDependencyNodes(forkNodes)) { if (!this.parentChildMap.containsKey(node)) { this.parentChildMap.put(node, Lists.newArrayList()); } for (DagNode otherNode : other.startNodes) { this.parentChildMap.get(node).add(otherNode); otherNode.addParentNode(node); } } //Each node which is a forkable node is added to list of end nodes of the concatenated dag other.endNodes.addAll(forkNodes); this.endNodes = other.endNodes; //Append all the entries from the other dag's parentChildMap to this dag's parentChildMap this.parentChildMap.putAll(other.parentChildMap); //If there exists a node in the other dag with no parent nodes, add it to the list of start nodes of the // concatenated dag. other.startNodes.stream().filter(node -> other.getParents(node).isEmpty()) .forEach(node -> this.startNodes.add(node)); this.nodes.addAll(other.nodes); return this; } /** * Merge the "other" dag to "this" dag and return "this" dag as a forest of the two dags. * More specifically, the merge() operation takes two dags and returns a disjoint union of the two dags. * * @param other dag to merge to this dag * @return the disjoint union of the two dags */ public Dag<T> merge(Dag<T> other) { if (other == null || other.isEmpty()) { return this; } if (this.isEmpty()) { return other; } //Append all the entries from the other dag's parentChildMap to this dag's parentChildMap for (Map.Entry<DagNode, List<DagNode<T>>> entry : other.parentChildMap.entrySet()) { this.parentChildMap.put(entry.getKey(), entry.getValue()); } //Append the startNodes, endNodes and nodes from the other dag to this dag. this.startNodes.addAll(other.startNodes); this.endNodes.addAll(other.endNodes); this.nodes.addAll(other.nodes); return this; } /** * DagNode is essentially a job within a Dag, usually they are used interchangeably. */ @Getter public static class DagNode<T> { private T value; //List of parent Nodes that are dependencies of this Node. private List<DagNode<T>> parentNodes; //Constructor public DagNode(T value) { this.value = value; } public void addParentNode(DagNode<T> node) { if (parentNodes == null) { parentNodes = Lists.newArrayList(node); return; } parentNodes.add(node); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DagNode that = (DagNode) o; return this.getValue().equals(that.getValue()); } @Override public int hashCode() { return this.getValue().hashCode(); } } /** * @return A string representation of the Dag as a JSON Array. */ @Override public String toString() { return this.getNodes().stream().map(node -> node.getValue().toString()).collect(Collectors.toList()).toString(); } }
3,839
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/BaseFlowGraph.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.service.modules.flow.FlowGraphPath; import org.apache.gobblin.service.modules.flowgraph.pathfinder.PathFinder; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * A thread-safe implementation of {@link FlowGraph}. The implementation maintains the following data structures: * <p>dataNodeMap - the mapping from a node identifier to the {@link DataNode} instance</p> * <p>nodesToEdges - the mapping from each {@link DataNode} to its outgoing {@link FlowEdge}s</p> * <p>flowEdgeMap - the mapping from a edge label to the {@link FlowEdge} instance</p> * * Read/Write Access to the {@link FlowGraph} is synchronized via a {@link ReentrantReadWriteLock}. */ @Alpha @Slf4j public class BaseFlowGraph implements FlowGraph { // Synchronize read/write access while the flowgraph is in the middle of an update private final ReadWriteLock rwLock = new ReentrantReadWriteLock(true); private final Map<DataNode, Set<FlowEdge>> nodesToEdges = new HashMap<>(); private final Map<String, DataNode> dataNodeMap = new HashMap<>(); private final Map<String, FlowEdge> flowEdgeMap = new HashMap<>(); private final Map<String, String> dataNodeAliasMap; public BaseFlowGraph() { this(new HashMap<>()); } public BaseFlowGraph(Map<String, String> dataNodeAliasMap) { this.dataNodeAliasMap = dataNodeAliasMap; } /** * Lookup a node by its identifier. * * @param nodeId node identifier * @return {@link DataNode} with nodeId as the identifier. */ public DataNode getNode(String nodeId) { return this.dataNodeMap.getOrDefault(nodeId, null); } /** * Add a {@link DataNode} to the {@link FlowGraph}. If the node already "exists" in the {@link FlowGraph} (i.e. the * FlowGraph already has another node with the same id), we remove the old node and add the new one. The * edges incident on the old node are preserved. * @param node to be added to the {@link FlowGraph} * @return true if node is successfully added to the {@link FlowGraph}. */ @Override public boolean addDataNode(DataNode node) { try { rwLock.writeLock().lock(); //Get edges adjacent to the node if it already exists Set<FlowEdge> edges = this.nodesToEdges.getOrDefault(node, new HashSet<>()); this.nodesToEdges.put(node, edges); this.dataNodeMap.put(node.getId(), node); } finally { rwLock.writeLock().unlock(); } return true; } /** * Add a {@link FlowEdge} to the {@link FlowGraph}. Addition of edge succeeds only if both the end points of the * edge are already nodes in the FlowGraph. If a {@link FlowEdge} already exists, the old FlowEdge is removed and * the new one added in its place. * @param edge * @return true if addition of {@FlowEdge} is successful. */ @Override public boolean addFlowEdge(FlowEdge edge) { try { rwLock.writeLock().lock(); String srcNode = edge.getSrc(); String dstNode = edge.getDest(); if (!dataNodeMap.containsKey(srcNode) || !dataNodeMap.containsKey(dstNode)) { return false; } DataNode dataNode = getNode(srcNode); if (dataNode == null) { return false; } Set<FlowEdge> adjacentEdges = this.nodesToEdges.get(dataNode); if (!adjacentEdges.add(edge)) { adjacentEdges.remove(edge); adjacentEdges.add(edge); } this.nodesToEdges.put(dataNode, adjacentEdges); String edgeId = edge.getId(); this.flowEdgeMap.put(edgeId, edge); return true; } finally { rwLock.writeLock().unlock(); } } /** * Delete a {@link DataNode} by its identifier * @param nodeId identifier of the {@link DataNode} to be deleted. * @return true if {@link DataNode} is successfully deleted. */ @Override public boolean deleteDataNode(String nodeId) { try { rwLock.writeLock().lock(); return this.dataNodeMap.containsKey(nodeId) && deleteDataNode(this.dataNodeMap.get(nodeId)); } finally { rwLock.writeLock().unlock(); } } /** * Delete a {@DataNode} from the {@link FlowGraph}. * @param node to be deleted. * @return true if {@link DataNode} is successfully deleted. */ public boolean deleteDataNode(DataNode node) { try { rwLock.writeLock().lock(); if (!dataNodeMap.containsKey(node.getId())) { return false; } //Delete node from dataNodeMap dataNodeMap.remove(node.getId()); //Delete all the edges adjacent to the node. First, delete edges from flowEdgeMap and next, remove the edges // from nodesToEdges for (FlowEdge edge : nodesToEdges.get(node)) { flowEdgeMap.remove(edge.getId()); } nodesToEdges.remove(node); return true; } finally { rwLock.writeLock().unlock(); } } /** * Delete a {@link DataNode} by its identifier * @param edgeId identifier of the {@link FlowEdge} to be deleted. * @return true if {@link FlowEdge} is successfully deleted. */ @Override public boolean deleteFlowEdge(String edgeId) { try { rwLock.writeLock().lock(); return flowEdgeMap.containsKey(edgeId) && deleteFlowEdge(flowEdgeMap.get(edgeId)); } finally { rwLock.writeLock().unlock(); } } /** * Delete a {@FlowEdge} from the {@link FlowGraph}. * @param edge to be deleted. * @return true if {@link FlowEdge} is successfully deleted. If the source of a {@link FlowEdge} does not exist or * if the {@link FlowEdge} is not in the graph, return false. */ public boolean deleteFlowEdge(FlowEdge edge) { try { rwLock.writeLock().lock(); if (!dataNodeMap.containsKey(edge.getSrc())) { return false; } DataNode node = dataNodeMap.get(edge.getSrc()); if (!nodesToEdges.get(node).contains(edge)) { return false; } this.nodesToEdges.get(node).remove(edge); this.flowEdgeMap.remove(edge.getId()); return true; } finally { rwLock.writeLock().unlock(); } } /** * Get the set of edges adjacent to a {@link DataNode} * @param nodeId identifier of the node * @return Set of {@link FlowEdge}s adjacent to the node. */ @Override public Set<FlowEdge> getEdges(String nodeId) { try { rwLock.readLock().lock(); DataNode dataNode = this.dataNodeMap.getOrDefault(nodeId, null); return getEdges(dataNode); } finally { rwLock.readLock().unlock(); } } /** * Get the set of edges adjacent to a {@link DataNode} * @param node {@link DataNode} * @return Set of {@link FlowEdge}s adjacent to the node. */ @Override public Set<FlowEdge> getEdges(DataNode node) { try { rwLock.readLock().lock(); return (node != null) ? this.nodesToEdges.getOrDefault(node, null) : null; } finally { rwLock.readLock().unlock(); } } /**{@inheritDoc}**/ @Override public FlowGraphPath findPath(FlowSpec flowSpec) throws PathFinder.PathFinderException, ReflectiveOperationException { try { rwLock.readLock().lock(); //Instantiate a PathFinder. Class pathFinderClass = Class.forName( ConfigUtils.getString(flowSpec.getConfig(), FlowGraphConfigurationKeys.FLOW_GRAPH_PATH_FINDER_CLASS, FlowGraphConfigurationKeys.DEFAULT_FLOW_GRAPH_PATH_FINDER_CLASS)); PathFinder pathFinder = (PathFinder) GobblinConstructorUtils.invokeLongestConstructor(pathFinderClass, this, flowSpec, dataNodeAliasMap); return pathFinder.findPath(); } finally { rwLock.readLock().unlock(); } } }
3,840
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/DatasetDescriptorErrorUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist; /** * Config keys related to {@link org.apache.gobblin.service.modules.dataset.DatasetDescriptor}. */ public class DatasetDescriptorErrorUtils { public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE = "%s.%s is mismatched. User input: %s. Expected value '%s'."; public static final String DATASET_DESCRIPTOR_KEY_MISSING_ERROR_TEMPLATE = "%s.%s is missing. Expected value '%s'."; public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_PARTITION = "%s.%s.%s is mismatched. User input: %s. Expected value '%s'."; public static final String DATASET_DESCRIPTOR_KEY_MISSING_ERROR_TEMPLATE_PARTITION = "%s.%s.%s is missing. Expected value '%s'."; public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_STRING_SPLIT = "%s.%s is mismatched. User input: %s is not splittable. Expected separation character: '%s' and total of %d parts."; public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_IS_GLOB_PATTERN = "%s.%s is mismatched. User input: %s is of a glob pattern. Expected input is not of a glob pattern."; public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_GLOB_PATTERN = "%s.%s is mismatched. User input: %s is not contained within the glob of %s."; public static final String DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_BLACKLIST = "%s.%s is mismatched. User input for %s: '%s' is in the blacklist. Please check the provided blacklist configuration."; /** * The populateErrorForDatasetDescriptorKey function will compare the submitted variables and add associated errors to the error array. * @param errors list of errors * @param inputDataset whether it's the input or output * @param configKey DatasetDescriptorConfigKeys key of the field fed into the function * @param inputDatasetDescriptorValue the property from the flow.conf * @param providedDatasetDescriptorValue the property from the submitted flow configuration * @param testNullOnly flag that is true if we only want to test if a property is null or not */ public static void populateErrorForDatasetDescriptorKey(ArrayList<String> errors, Boolean inputDataset, String configKey, String inputDatasetDescriptorValue, String providedDatasetDescriptorValue, Boolean testNullOnly) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; if (providedDatasetDescriptorValue == null) { errors.add(String.format(DATASET_DESCRIPTOR_KEY_MISSING_ERROR_TEMPLATE, datasetDescriptorPrefix, configKey, inputDatasetDescriptorValue)); } if (!testNullOnly && !(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY.equalsIgnoreCase(inputDatasetDescriptorValue) || inputDatasetDescriptorValue.equalsIgnoreCase(providedDatasetDescriptorValue))) { errors.add(String.format(DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE, datasetDescriptorPrefix, configKey, providedDatasetDescriptorValue, inputDatasetDescriptorValue)); } } /** * The populateErrorForDatasetDescriptorKeyPartition function will compare the submitted variables and add associated errors to the error array. * @param errors list of errors * @param inputDataset whether it's the input or output * @param configKey DatasetDescriptorConfigKeys key of the field fed into the function * @param partitionConfigKey the subkey for the partition (e.g. partition.pattern) * @param inputDatasetDescriptorValue the property from the flow.conf * @param providedDatasetDescriptorValue the property from the submitted flow configuration * @param testNullOnly flag that is true if we only want to test if a property is null or not */ public static void populateErrorForDatasetDescriptorKeyPartition(ArrayList<String> errors, Boolean inputDataset, String configKey, String partitionConfigKey, String inputDatasetDescriptorValue, String providedDatasetDescriptorValue, Boolean testNullOnly) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; if (providedDatasetDescriptorValue == null) { errors.add(String.format(DATASET_DESCRIPTOR_KEY_MISSING_ERROR_TEMPLATE_PARTITION, datasetDescriptorPrefix, configKey, partitionConfigKey, inputDatasetDescriptorValue)); } if (!testNullOnly && !(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY.equalsIgnoreCase(inputDatasetDescriptorValue) || inputDatasetDescriptorValue.equalsIgnoreCase(providedDatasetDescriptorValue))) { errors.add(String.format(DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_PARTITION, datasetDescriptorPrefix, configKey, partitionConfigKey, providedDatasetDescriptorValue, inputDatasetDescriptorValue)); } } /** * The populateErrorForDatasetDescriptorKeyRegex function will compare the submitted variables using the regex matching method and add associated errors to the error array. * @param errors list of errors * @param inputDataset whether it's the input or output * @param configKey DatasetDescriptorConfigKeys key of the field fed into the function * @param inputDatasetDescriptorValue the property from the flow.conf * @param providedDatasetDescriptorValue the property from the submitted flow configuration */ public static void populateErrorForDatasetDescriptorKeyRegex(ArrayList<String> errors, Boolean inputDataset, String configKey, String inputDatasetDescriptorValue, String providedDatasetDescriptorValue) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; if (!Pattern.compile(inputDatasetDescriptorValue).matcher(providedDatasetDescriptorValue).matches()) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE, datasetDescriptorPrefix, configKey, providedDatasetDescriptorValue, inputDatasetDescriptorValue)); } } /** * The populateErrorForDatasetDescriptorKeyBlacklist function will check whether the database and/or table is in the blacklist config. * @param errors list of errors * @param inputDataset whether it's the input or output * @param type whether it's the database or the table within a database that the function is checking * @param configKey DatasetDescriptorConfigKeys key of the field fed into the function * @param whitelistBlacklist whitelistblacklist object for filtering hive based tables * @param inputDbName the database name from the submitted flow configuration * @param inputTableName the table name from the submitted flow configuration */ public static void populateErrorForDatasetDescriptorKeyBlacklist(ArrayList<String> errors, Boolean inputDataset, String type, String configKey, WhitelistBlacklist whitelistBlacklist, String inputDbName, String inputTableName) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; if (type.equals("database") && !whitelistBlacklist.acceptDb(inputDbName)) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_BLACKLIST, datasetDescriptorPrefix, "database", configKey, inputDbName)); } else if (type.equals("table") && !whitelistBlacklist.acceptTable(inputDbName, inputTableName)) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_BLACKLIST, datasetDescriptorPrefix, "table", configKey, String.join(".", inputDbName, inputTableName))); } } /** * * @param errors list of errors * @param inputDataset whether it's the input or output * @param configKey DatasetDescriptorConfigKeys key of the field fed into the function * @param parts the list of parts after splitting using the separation character * @param inputPath the path from the submitted flow configuration * @param sepChar the delimiter/separation character * @param size the expected size of the list of parts */ public static void populateErrorForDatasetDescriptorKeySize(ArrayList<String> errors, Boolean inputDataset, String configKey, List<String> parts, String inputPath, String sepChar, int size) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; if (parts.size() != size) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_STRING_SPLIT, datasetDescriptorPrefix, configKey, inputPath, sepChar, size)); } } }
3,841
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FlowEdgeFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.List; import com.typesafe.config.Config; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; public interface FlowEdgeFactory { /** * Construct a {@link FlowEdge} from the edge properties * @param edgeProps properties of the {@link FlowEdge} * @param flowCatalog an instance of {@link FSFlowTemplateCatalog} that returns {@link org.apache.gobblin.service.modules.template.FlowTemplate}s * useful for creating a {@link FlowEdge}. * @return an instance of {@link FlowEdge} * @throws FlowEdgeCreationException */ public FlowEdge createFlowEdge(Config edgeProps, FSFlowTemplateCatalog flowCatalog, List<SpecExecutor> specExecutors) throws FlowEdgeCreationException; public class FlowEdgeCreationException extends Exception { private static final String MESSAGE_FORMAT = "Failed to create FlowEdge because of: %s"; public FlowEdgeCreationException(Exception e) { super(String.format(MESSAGE_FORMAT, e.getMessage()), e); } } }
3,842
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/DataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; /** * Representation of a node in the FlowGraph. Each node is identified by a unique identifier. */ @Alpha public interface DataNode { /** * @return the identifier of a {@link DataNode}. */ String getId(); /** * @return the attributes of a {@link DataNode}. It also includes properties for resolving a {@link org.apache.gobblin.runtime.api.JobTemplate} * e.g. "source.fs.uri" for an HDFS node, "jdbc.publisher.url" for JDBC node. */ Config getRawConfig(); /** * @return a default dataset descriptor class for this DataNode, or null if a default should not be used. */ String getDefaultDatasetDescriptorClass(); /** * @return a default dataset descriptor platform for this DataNode, or null if a default should not be used. */ String getDefaultDatasetDescriptorPlatform(); /** * @return true if the {@link DataNode} is active */ boolean isActive(); class DataNodeCreationException extends Exception { private static final String MESSAGE_FORMAT = "Failed to create DataNode because of: %s"; public DataNodeCreationException(Exception e) { super(String.format(MESSAGE_FORMAT, e.getMessage()), e); } } }
3,843
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FSPathAlterationFlowGraphListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.io.File; import org.apache.hadoop.fs.Path; import com.google.common.base.Optional; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.service.modules.flow.MultiHopFlowCompiler; import org.apache.gobblin.service.modules.template_catalog.UpdatableFSFlowTemplateCatalog; import org.apache.gobblin.util.filesystem.PathAlterationListener; import org.apache.gobblin.util.filesystem.PathAlterationObserver; /** * An implementation of {@link PathAlterationListener} to listen for changes in a directory and apply it to a GaaS FlowGraph * Is invoked by {@link PathAlterationObserver} which would check a folder and perform recursive comparisons on files compared to * their last polled state. On any detected differences in files when a check is done, the {@link FlowGraph} will be updated. * */ @Slf4j public class FSPathAlterationFlowGraphListener implements PathAlterationListener { private final MultiHopFlowCompiler compiler; private final BaseFlowGraphHelper flowGraphHelper; private Optional<UpdatableFSFlowTemplateCatalog> flowTemplateCatalog; private final boolean shouldMonitorTemplateCatalog; public FSPathAlterationFlowGraphListener(Optional<UpdatableFSFlowTemplateCatalog> flowTemplateCatalog, MultiHopFlowCompiler compiler, String baseDirectory, BaseFlowGraphHelper flowGraphHelper, boolean shouldMonitorTemplateCatalog) { this.flowGraphHelper = flowGraphHelper; this.flowTemplateCatalog = flowTemplateCatalog; this.shouldMonitorTemplateCatalog = shouldMonitorTemplateCatalog; File graphDir = new File(baseDirectory); // Populate the flowgraph with any existing files if (!graphDir.exists()) { throw new RuntimeException(String.format("Flowgraph directory at path %s does not exist!", graphDir)); } this.compiler = compiler; } @Override public void onStart(final PathAlterationObserver observer) { } @Override public void onFileCreate(final Path path) { } @Override public void onFileChange(final Path path) { } @Override public void onStop(final PathAlterationObserver observer) { } @Override public void onDirectoryCreate(final Path directory) { } @Override public void onDirectoryChange(final Path directory) { } @Override public void onDirectoryDelete(final Path directory) { } @Override public void onFileDelete(final Path path) { } @Override public void onCheckDetectedChange() { log.info("Detecting change in flowgraph files, reloading flowgraph"); if (this.shouldMonitorTemplateCatalog) { // Clear template cache as templates are colocated with the flowgraph, and thus could have been updated too this.flowTemplateCatalog.get().clearTemplates(); } FlowGraph newGraph = this.flowGraphHelper.generateFlowGraph(); if (newGraph != null) { this.compiler.setFlowGraph(newGraph); } } }
3,844
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/SharedFlowGraphHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.fs.Path; import com.google.common.base.Optional; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * Supports a configuration of a flowgraph where it can support multiple sub-flowgraphs within its directory * Node definitions are shared between each subgraph, but can be overwritten within the subgraph * Edge definitions are only defined in the subgraphs * e.g. * /gobblin-flowgraph-absolute-dir * /subgraphA * /nodeA (NODE_FOLDER_DEPTH) * /nodeB * edgeAB.properties * /subgraphB * /nodeA * /nodeB * edgeAB.properties (EDGE_FILE_DEPTH) * A.properties (NODE_FILE_DEPTH) * /nodes * A.properties * B.properties */ @Slf4j public class SharedFlowGraphHelper extends BaseFlowGraphHelper { protected String sharedNodeFolder; private static String SHARED_NODE_FOLDER_NAME = "nodes"; private static int NODE_FOLDER_DEPTH = 2; public SharedFlowGraphHelper(Optional<? extends FSFlowTemplateCatalog> flowTemplateCatalog, Map<URI, TopologySpec> topologySpecMap, String baseDirectory, String flowGraphFolderName, String javaPropsExtentions, String hoconFileExtensions, boolean instrumentationEnabled, Config config) { super(flowTemplateCatalog, topologySpecMap, baseDirectory, flowGraphFolderName, javaPropsExtentions, hoconFileExtensions, instrumentationEnabled, config); this.sharedNodeFolder = baseDirectory + File.separator + SHARED_NODE_FOLDER_NAME; } /** * Looks into the sharedNodeFolder to use those configurations as fallbacks for the node to add * Otherwise if the shared node does not exist, attempt to add the node in the same manner as {@link BaseFlowGraphHelper} * @param graph * @param path of node folder in the subgraph, so path is expected to be a directory */ @Override protected void addDataNode(FlowGraph graph, java.nio.file.Path path) { try { // Load node from shared folder first if it exists Config sharedNodeConfig = ConfigFactory.empty(); List<String> nodeFileSuffixes = new ArrayList<>(this.javaPropsExtensions); nodeFileSuffixes.addAll(this.hoconFileExtensions); // Since there can be multiple file types supported, check if there is a shared node definition that matches any of the file types // If multiple definitions in the same folder, only load one of them // Assume that configuration overrides in subfolders use the same file type for the same node Config nodeConfig = ConfigFactory.empty(); for (String fileSuffix: nodeFileSuffixes) { String nodePropertyFile = path.getFileName().toString() + "." + fileSuffix; File sharedNodeFile = new File(this.sharedNodeFolder, nodePropertyFile); if (sharedNodeFile.exists()) { nodeConfig = loadNodeFileWithOverrides(new Path(sharedNodeFile.getPath())); } File nodeFilePath = new File(path.toString(), nodePropertyFile); if (nodeFilePath.exists()) { nodeConfig = loadNodeFileWithOverrides(new Path(nodeFilePath.getPath())).withFallback(nodeConfig); } if (!nodeConfig.isEmpty()) { break; } } if (nodeConfig.isEmpty()) { throw new IOException( String.format("Cannot find expected node file starting with %s in %s or %s", path.getFileName().toString(), sharedNodeFolder, path)); } Class dataNodeClass = Class.forName(ConfigUtils.getString(nodeConfig, FlowGraphConfigurationKeys.DATA_NODE_CLASS, FlowGraphConfigurationKeys.DEFAULT_DATA_NODE_CLASS)); DataNode dataNode = (DataNode) GobblinConstructorUtils.invokeLongestConstructor(dataNodeClass, nodeConfig); if (!graph.addDataNode(dataNode)) { log.warn("Could not add DataNode {} to FlowGraph; skipping", dataNode.getId()); } else { log.info("Added Datanode {} to FlowGraph", dataNode.getId()); } } catch (IOException | ReflectiveOperationException e) { if (this.flowGraphUpdateFailedMeter.isPresent()) { this.flowGraphUpdateFailedMeter.get().mark(); } log.warn(String.format("Could not add DataNode defined in %s due to exception: ", path), e); } } @Override protected Config getNodeConfigWithOverrides(Config nodeConfig, Path nodeFilePath) { String nodeId = FilenameUtils.removeExtension(nodeFilePath.getName().toString()); return nodeConfig.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(nodeId)); } @Override protected int getNodeFileDepth() { return NODE_FOLDER_DEPTH; } }
3,845
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/DatasetDescriptorConfigKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; /** * Config keys related to {@link org.apache.gobblin.service.modules.dataset.DatasetDescriptor}. */ public class DatasetDescriptorConfigKeys { //Gobblin Service Dataset Descriptor related keys public static final String FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX = "gobblin.flow.input.dataset.descriptor"; public static final String FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX = "gobblin.flow.output.dataset.descriptor"; public static final String FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX = "gobblin.flow.edge.input.dataset.descriptor"; public static final String FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX = "gobblin.flow.edge.output.dataset.descriptor"; public static final String CLASS_KEY = "class"; public static final String PLATFORM_KEY = "platform"; public static final String PATH_KEY = "path"; public static final String SUBPATHS_KEY = "subPaths"; public static final String FS_URI_KEY = "fs.uri"; public static final String DATABASE_KEY = "databaseName"; public static final String TABLE_KEY = "tableName"; public static final String FORMAT_KEY = "format"; public static final String CODEC_KEY = "codec"; public static final String DESCRIPTION_KEY = "description"; public static final String IS_RETENTION_APPLIED_KEY = "isRetentionApplied"; public static final String IS_COMPACTED_KEY = "isCompacted"; public static final String IS_COMPACTED_AND_DEDUPED_KEY = "isCompactedAndDeduped"; public static final String IS_INPUT_DATASET = "isInputDataset"; //Dataset encryption related keys public static final String ENCYPTION_PREFIX = "encrypt"; public static final String ENCRYPTION_ALGORITHM_KEY = "algorithm"; public static final String ENCRYPTION_KEYSTORE_TYPE_KEY = "keystore_type"; public static final String ENCRYPTION_KEYSTORE_ENCODING_KEY = "keystore_encoding"; public static final String ENCRYPTION_LEVEL_KEY = "level"; public static final String ENCRYPTED_FIELDS = "encryptedFields"; //Dataset partition related keys public static final String PARTITION_PREFIX = "partition"; public static final String PARTITION_TYPE_KEY = "type"; public static final String PARTITION_PATTERN_KEY = "pattern"; public static final String DATASET_DESCRIPTOR_CONFIG_ANY = "any"; public static final String DATASET_DESCRIPTOR_CONFIG_NONE = "none"; }
3,846
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/BaseFlowEdge.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.net.URI; import java.util.List; import org.apache.hadoop.security.UserGroupInformation; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.service.modules.template.FlowTemplate; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; import org.apache.gobblin.util.ConfigUtils; /** * An implementation of {@link FlowEdge}. If a {@link FSFlowTemplateCatalog} is specified in the constructor, * {@link #flowTemplate} is reloaded when {@link #getFlowTemplate()} is called. */ @Alpha @Slf4j public class BaseFlowEdge implements FlowEdge { @Getter protected String src; @Getter protected String dest; protected FlowTemplate flowTemplate; @Getter private List<SpecExecutor> executors; @Getter private Config config; @Getter private String id; @Getter private boolean active; private final FSFlowTemplateCatalog flowTemplateCatalog; //Constructor public BaseFlowEdge(List<String> endPoints, String edgeId, FlowTemplate flowTemplate, List<SpecExecutor> executors, Config properties, boolean active) { this(endPoints, edgeId, flowTemplate, executors, properties, active, null); } public BaseFlowEdge(List<String> endPoints, String edgeId, FlowTemplate flowTemplate, List<SpecExecutor> executors, Config properties, boolean active, FSFlowTemplateCatalog flowTemplateCatalog) { this.src = endPoints.get(0); this.dest = endPoints.get(1); this.flowTemplate = flowTemplate; this.executors = executors; this.active = active; this.config = properties; this.id = edgeId; this.flowTemplateCatalog = flowTemplateCatalog; } @Override public FlowTemplate getFlowTemplate() { try { if (this.flowTemplateCatalog != null) { this.flowTemplate = this.flowTemplateCatalog.getFlowTemplate(this.flowTemplate.getUri()); } } catch (Exception e) { // If loading template fails, use the template that was successfully loaded on construction log.warn("Failed to get flow template " + this.flowTemplate.getUri() + ", using in-memory flow template", e); } return this.flowTemplate; } @Override public boolean isAccessible(UserGroupInformation user) { return true; } /** * The {@link FlowEdge}s are the same if they have the same endpoints and both refer to the same {@FlowTemplate} i.e. * the {@link FlowTemplate} uris are the same */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } FlowEdge that = (FlowEdge) o; if (!(this.getSrc().equals(that.getSrc())) && ((this.getDest()).equals(that.getDest()))) { return false; } if (!this.getFlowTemplate().getUri().equals(that.getFlowTemplate().getUri())) { return false; } return true; } @Override public int hashCode() { return this.id.hashCode(); } @Override public String toString() { return this.id; } /** * A {@link FlowEdgeFactory} for creating {@link BaseFlowEdge}. */ public static class Factory implements FlowEdgeFactory { /** * A method to return an instance of {@link BaseFlowEdge}. The method performs all the validation checks * and returns * @param edgeProps Properties of edge * @param flowTemplateCatalog Flow Catalog used to retrieve {@link FlowTemplate}s. * @return a {@link BaseFlowEdge} */ @Override public FlowEdge createFlowEdge(Config edgeProps, FSFlowTemplateCatalog flowTemplateCatalog, List<SpecExecutor> specExecutors) throws FlowEdgeCreationException { try { String source = ConfigUtils.getString(edgeProps, FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(source), "A FlowEdge must have a non-null or empty source"); String destination = ConfigUtils.getString(edgeProps, FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(destination), "A FlowEdge must have a non-null or empty destination"); List<String> endPoints = Lists.newArrayList(source, destination); String edgeId = ConfigUtils.getString(edgeProps, FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(edgeId), "A FlowEdge must have a non-null or empty Id"); String flowTemplateDirUri = ConfigUtils.getString(edgeProps, FlowGraphConfigurationKeys.FLOW_EDGE_TEMPLATE_DIR_URI_KEY, ""); //Perform basic validation Preconditions.checkArgument(endPoints.size() == 2, "A FlowEdge must have 2 end points"); Preconditions .checkArgument(specExecutors.size() > 0, "A FlowEdge must have at least one SpecExecutor"); Preconditions .checkArgument(!Strings.isNullOrEmpty(flowTemplateDirUri), "FlowTemplate URI must be not null or empty"); boolean isActive = ConfigUtils.getBoolean(edgeProps, FlowGraphConfigurationKeys.FLOW_EDGE_IS_ACTIVE_KEY, true); FlowTemplate flowTemplate = flowTemplateCatalog.getFlowTemplate(new URI(flowTemplateDirUri)); return new BaseFlowEdge(endPoints, edgeId, flowTemplate, specExecutors, edgeProps, isActive, flowTemplateCatalog); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new FlowEdgeCreationException(e); } } } }
3,847
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FlowGraphMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import com.google.common.util.concurrent.Service; /** * A service that listens to an external service or filesystem (Git, FS) to apply changes to the flowgraph without having to restart GaaS */ public interface FlowGraphMonitor extends Service { /** * Indicates that the service is ready to load the flowgraph * @param value whether GaaS is ready */ void setActive(boolean value); }
3,848
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/BaseDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.util.ConfigUtils; /** * An implementation of {@link DataNode}. */ @Alpha @Slf4j @EqualsAndHashCode (exclude = {"rawConfig", "resolvedConfig", "active"}) public class BaseDataNode implements DataNode { @Getter private String id; @Getter private Config rawConfig; @Getter private Config resolvedConfig; @Getter private boolean active = true; public BaseDataNode(Config nodeProps) throws DataNodeCreationException { try { this.rawConfig = nodeProps; this.resolvedConfig = nodeProps.resolve(); String nodeId = ConfigUtils.getString(this.resolvedConfig, FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(nodeId), "Node Id cannot be null or empty"); this.id = nodeId; if (this.resolvedConfig.hasPath(FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY)) { this.active = this.resolvedConfig.getBoolean(FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY); } } catch (Exception e) { throw new DataNodeCreationException(e); } } @Override public String getDefaultDatasetDescriptorClass() { return null; } @Override public String getDefaultDatasetDescriptorPlatform() { return null; } }
3,849
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/FlowGraph.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.util.Collection; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.service.modules.flow.FlowGraphPath; import org.apache.gobblin.service.modules.flowgraph.pathfinder.PathFinder; /** * An interface for {@link FlowGraph}. A {@link FlowGraph} consists of {@link DataNode}s and {@link FlowEdge}s. * The interface provides methods for adding and removing {@link DataNode}s and {@link FlowEdge}s to the {@link FlowGraph}. * In addition the interface provides methods to return factory classes for creation of {@link DataNode}s and {@link FlowEdge}s. */ @Alpha public interface FlowGraph { /** * Get a {@link DataNode} from the node identifier * @param nodeId {@link DataNode} identifier. * @return the {@link DataNode} object if the node is present in the {@link FlowGraph}. */ DataNode getNode(String nodeId); /** * Add a {@link DataNode} to the {@link FlowGraph} * @param node {@link DataNode} to be added * @return true if {@link DataNode} is added to the {@link FlowGraph} successfully. */ boolean addDataNode(DataNode node); /** * Add a {@link FlowEdge} to the {@link FlowGraph} * @param edge {@link FlowEdge} to be added * @return true if {@link FlowEdge} is added to the {@link FlowGraph} successfully. */ boolean addFlowEdge(FlowEdge edge); /** * Remove a {@link DataNode} and all its incident edges from the {@link FlowGraph} * @param nodeId identifier of the {@link DataNode} to be removed * @return true if {@link DataNode} is removed from the {@link FlowGraph} successfully. */ boolean deleteDataNode(String nodeId); /** * Remove a {@link FlowEdge} from the {@link FlowGraph} * @param edgeId label of the edge to be removed * @return true if edge is removed from the {@link FlowGraph} successfully. */ boolean deleteFlowEdge(String edgeId); /** * Get a collection of edges adjacent to a {@link DataNode}. Useful for path finding algorithms and graph * traversal algorithms such as Djikstra's shortest-path algorithm, BFS * @param nodeId identifier of the {@link DataNode} * @return a collection of edges adjacent to the {@link DataNode} */ Collection<FlowEdge> getEdges(String nodeId); /** * Get a collection of edges adjacent to a {@link DataNode}. * @param node {@link DataNode} * @return a collection of edges adjacent to the {@link DataNode} */ Collection<FlowEdge> getEdges(DataNode node); /** * A method that takes a {@link FlowSpec} containing the source and destination {@link DataNode}s, as well as the * source and target {@link org.apache.gobblin.service.modules.dataset.DatasetDescriptor}s, and returns a sequence * of fully resolved {@link org.apache.gobblin.runtime.api.JobSpec}s that will move the source dataset * from the source datanode, perform any necessary transformations and land the dataset at the destination node * in the format described by the target {@link org.apache.gobblin.service.modules.dataset.DatasetDescriptor}. * * @param flowSpec a {@link org.apache.gobblin.runtime.api.Spec} containing a high-level description of input flow. * @return an instance of {@link FlowGraphPath} that encapsulates a sequence of {@link org.apache.gobblin.runtime.api.JobSpec}s * satisfying flowSpec. */ FlowGraphPath findPath(FlowSpec flowSpec) throws PathFinder.PathFinderException, ReflectiveOperationException; }
3,850
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/BaseFlowGraphHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.collect.Sets; import com.google.common.io.Files; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PullFileLoader; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * Provides the common set of functionalities needed by {@link FlowGraphMonitor} to read changes in files and * apply them to a {@link FlowGraph} * Assumes that the directory structure between flowgraphs configuration files are the same. * * Assumes that the flowgraph follows this format * /gobblin-flowgraph * /nodeA * /nodeB * edgeAB.properties * A.properties * /nodeB * B.properties * */ @Slf4j public class BaseFlowGraphHelper { private static final int NODE_FILE_DEPTH = 3; private static final int EDGE_FILE_DEPTH = 4; private static final String FLOW_EDGE_LABEL_JOINER_CHAR = "_"; final String baseDirectory; private final Config emptyConfig = ConfigFactory.empty(); private final Optional<? extends FSFlowTemplateCatalog> flowTemplateCatalog; private final Map<URI, TopologySpec> topologySpecMap; protected MetricContext metricContext; final String flowGraphFolderName; final PullFileLoader pullFileLoader; protected final Set<String> javaPropsExtensions; protected final Set<String> hoconFileExtensions; protected final Optional<ContextAwareMeter> flowGraphUpdateFailedMeter; public BaseFlowGraphHelper(Optional<? extends FSFlowTemplateCatalog> flowTemplateCatalog, Map<URI, TopologySpec> topologySpecMap, String baseDirectory, String flowGraphFolderName, String javaPropsExtentions, String hoconFileExtensions, boolean instrumentationEnabled, Config config) { this.flowTemplateCatalog = flowTemplateCatalog; this.topologySpecMap = topologySpecMap; this.baseDirectory = baseDirectory; this.flowGraphFolderName = flowGraphFolderName; Path folderPath = new Path(baseDirectory, this.flowGraphFolderName); this.javaPropsExtensions = Sets.newHashSet(javaPropsExtentions.split(",")); this.hoconFileExtensions = Sets.newHashSet(hoconFileExtensions.split(",")); if (instrumentationEnabled) { this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), BaseFlowGraphHelper.class); this.flowGraphUpdateFailedMeter = Optional.of(this.metricContext.contextAwareMeter(ServiceMetricNames.FLOWGRAPH_UPDATE_FAILED_METER)); } else { this.flowGraphUpdateFailedMeter = Optional.absent(); } try { this.pullFileLoader = new PullFileLoader(folderPath, FileSystem.get(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), this.javaPropsExtensions, this.hoconFileExtensions); } catch (IOException e) { throw new RuntimeException("Could not create pull file loader", e); } } /** * Add a {@link DataNode} to the {@link FlowGraph}. The method uses the {@link FlowGraphConfigurationKeys#DATA_NODE_CLASS} config * to instantiate a {@link DataNode} from the node config file. * @param path of node to add */ protected void addDataNode(FlowGraph graph, java.nio.file.Path path) { if (!java.nio.file.Files.isDirectory(path) && checkFilePath(path.toString(), getNodeFileDepth())) { Path nodeFilePath = new Path(this.baseDirectory, path.toString()); try { Config config = loadNodeFileWithOverrides(nodeFilePath); Class dataNodeClass = Class.forName(ConfigUtils.getString(config, FlowGraphConfigurationKeys.DATA_NODE_CLASS, FlowGraphConfigurationKeys.DEFAULT_DATA_NODE_CLASS)); DataNode dataNode = (DataNode) GobblinConstructorUtils.invokeLongestConstructor(dataNodeClass, config); if (!graph.addDataNode(dataNode)) { log.warn("Could not add DataNode {} to FlowGraph; skipping", dataNode.getId()); } else { log.info("Added Datanode {} to FlowGraph", dataNode.getId()); } } catch (Exception e) { if (this.flowGraphUpdateFailedMeter.isPresent()) { this.flowGraphUpdateFailedMeter.get().mark(); } log.warn(String.format("Could not add DataNode defined in %s due to exception: ", path), e); } } } /** * Add a {@link FlowEdge} to the {@link FlowGraph}. The method uses the {@link FlowEdgeFactory} instance * provided by the {@link FlowGraph} to build a {@link FlowEdge} from the edge config file. * @param path of edge to add */ protected void addFlowEdge(FlowGraph graph, java.nio.file.Path path) { if (!java.nio.file.Files.isDirectory(path) && checkFilePath(path.toString(), getEdgeFileDepth())) { Path edgeFilePath = new Path(this.baseDirectory, path.toString()); try { Config edgeConfig = loadEdgeFileWithOverrides(edgeFilePath); List<SpecExecutor> specExecutors = getSpecExecutors(edgeConfig); Class flowEdgeFactoryClass = Class.forName( ConfigUtils.getString(edgeConfig, FlowGraphConfigurationKeys.FLOW_EDGE_FACTORY_CLASS, FlowGraphConfigurationKeys.DEFAULT_FLOW_EDGE_FACTORY_CLASS)); FlowEdgeFactory flowEdgeFactory = (FlowEdgeFactory) GobblinConstructorUtils.invokeLongestConstructor(flowEdgeFactoryClass, edgeConfig); if (flowTemplateCatalog.isPresent()) { FlowEdge edge = flowEdgeFactory.createFlowEdge(edgeConfig, flowTemplateCatalog.get(), specExecutors); if (!graph.addFlowEdge(edge)) { log.warn("Could not add edge {} to FlowGraph; skipping", edge.getId()); } else { log.info("Added edge {} to FlowGraph", edge.getId()); } } else { log.warn("Could not add edge defined in {} to FlowGraph as FlowTemplateCatalog is absent", path); } } catch (Exception e) { log.warn("Could not add edge defined in {} due to exception", path, e); if (this.flowGraphUpdateFailedMeter.isPresent()) { this.flowGraphUpdateFailedMeter.get().mark(); } } } } /** * check whether the file has the proper naming and hierarchy for nodes and edges * @param file the relative path from the root of the flowgraph * @return false if the file does not conform */ protected boolean checkFilePath(String file, int depth) { // The file is either a node file or an edge file and needs to be stored at either: // flowGraphDir/nodeName/nodeName.properties (if it is a node file), or // flowGraphDir/nodeName/nodeName/edgeName.properties (if it is an edge file) Path filePath = new Path(file); String fileExtension = Files.getFileExtension(filePath.getName()); if (!checkFileLevelRelativeToRoot(filePath, depth) || !(this.javaPropsExtensions.contains(fileExtension) || this.hoconFileExtensions.contains(fileExtension))) { log.warn("Changed file does not conform to directory structure and file name format, skipping: " + filePath); return false; } return true; } /** * Helper to check if a file has proper hierarchy. * @param filePath path of the node/edge file * @param depth expected depth of the file * @return true if the file conforms to the expected hierarchy */ public boolean checkFileLevelRelativeToRoot(Path filePath, int depth) { if (filePath == null) { return false; } Path path = filePath; for (int i = 0; i < depth - 1; i++) { path = path.getParent(); } return path != null ? path.getName().equals(flowGraphFolderName) : false; } /** * Helper that overrides the data.node.id property with name derived from the node file path * @param nodeConfig node config * @param nodeFilePath path of the node file * @return config with overridden data.node.id */ protected Config getNodeConfigWithOverrides(Config nodeConfig, Path nodeFilePath) { String nodeId = nodeFilePath.getParent().getName(); return nodeConfig.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(nodeId)); } /** * Helper that overrides the flow edge properties with name derived from the edge file path * @param edgeConfig edge config * @param edgeFilePath path of the edge file * @return config with overridden edge properties */ protected Config getEdgeConfigWithOverrides(Config edgeConfig, Path edgeFilePath) { String source = edgeFilePath.getParent().getParent().getName(); String destination = edgeFilePath.getParent().getName(); String edgeName = Files.getNameWithoutExtension(edgeFilePath.getName()); return edgeConfig.withValue(FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY, ConfigValueFactory.fromAnyRef(source)) .withValue(FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY, ConfigValueFactory.fromAnyRef(destination)) .withValue(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, ConfigValueFactory.fromAnyRef(getEdgeId(source, destination, edgeName))); } /** * This method first retrieves the logical names of all the {@link org.apache.gobblin.runtime.api.SpecExecutor}s * for this edge and returns the SpecExecutors from the {@link TopologySpec} map. * @param edgeConfig containing the logical names of SpecExecutors for this edge. * @return a {@link List<SpecExecutor>}s for this edge. */ private List<SpecExecutor> getSpecExecutors(Config edgeConfig) throws URISyntaxException, IOException { //Get the logical names of SpecExecutors where the FlowEdge can be executed. List<String> specExecutorNames = ConfigUtils.getStringList(edgeConfig, FlowGraphConfigurationKeys.FLOW_EDGE_SPEC_EXECUTORS_KEY); //Load all the SpecExecutor configurations for this FlowEdge from the SpecExecutor Catalog. List<SpecExecutor> specExecutors = new ArrayList<>(specExecutorNames.size()); for (String specExecutorName : specExecutorNames) { URI specExecutorUri = new URI(specExecutorName); if (!this.topologySpecMap.containsKey(specExecutorUri)) { throw new IOException(String.format("Spec executor %s does not exist in the topologySpecStore.", specExecutorUri)); } specExecutors.add(this.topologySpecMap.get(specExecutorUri).getSpecExecutor()); } return specExecutors; } /** * Load the node file. * @param filePath path of the node file relative to the repository root * @return the configuration object * @throws IOException */ protected Config loadNodeFileWithOverrides(Path filePath) throws IOException { Config nodeConfig = this.pullFileLoader.loadPullFile(filePath, emptyConfig, false, false); return getNodeConfigWithOverrides(nodeConfig, filePath); } /** * Load the edge file. * @param filePath path of the edge file relative to the repository root * @return the configuration object * @throws IOException */ protected Config loadEdgeFileWithOverrides(Path filePath) throws IOException { Config edgeConfig = this.pullFileLoader.loadPullFile(filePath, emptyConfig, false, false); return getEdgeConfigWithOverrides(edgeConfig, filePath); } /** * Loads the entire flowgraph from the path configured in {@link org.apache.gobblin.configuration.ConfigurationKeys.FLOWGRAPH_BASE_DIR } * Expects nodes to be in the format of /flowGraphName/nodeA/nodeA.properties * Expects edges to be in the format of /flowGraphName/nodeA/nodeB/edgeAB.properties * The current flowgraph will be swapped atomically with the new flowgraph that is loaded */ public FlowGraph generateFlowGraph() { FlowGraph newFlowGraph = new BaseFlowGraph(); java.nio.file.Path graphPath = new File(this.baseDirectory).toPath(); try { List<java.nio.file.Path> edges = new ArrayList<>(); // All nodes must be added first before edges, otherwise edges may have a missing source or destination. // Need to convert files to Hadoop Paths to be compatible with FileAlterationListener java.nio.file.Files.walk(graphPath).forEach(fileName -> { if (checkFileLevelRelativeToRoot(new Path(fileName.toString()), getNodeFileDepth())) { addDataNode(newFlowGraph, fileName); } else if (checkFileLevelRelativeToRoot(new Path(fileName.toString()), getEdgeFileDepth())) { edges.add(fileName); } }); for (java.nio.file.Path edge : edges) { addFlowEdge(newFlowGraph, edge); } return newFlowGraph; } catch (IOException e) { // Log and report error, but do not break or crash the flowgraph so that currently running flows can continue if (this.flowGraphUpdateFailedMeter.isPresent()) { this.flowGraphUpdateFailedMeter.get().mark(); } log.error(String.format("Error while populating file based flowgraph at path %s", graphPath), e); return null; } } /** * Get an edge label from the edge properties * @param source source data node id * @param destination destination data node id * @param edgeName simple name of the edge (e.g. file name without extension of the edge file) * @return a string label identifying the edge */ public String getEdgeId(String source, String destination, String edgeName) { return Joiner.on(FLOW_EDGE_LABEL_JOINER_CHAR).join(source, destination, edgeName); } protected int getNodeFileDepth() { return NODE_FILE_DEPTH; } protected int getEdgeFileDepth() { return EDGE_FILE_DEPTH; } }
3,851
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/pathfinder/AbstractPathFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.pathfinder; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValue; import com.typesafe.config.ConfigValueFactory; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.dataset.DatasetDescriptor; import org.apache.gobblin.service.modules.dataset.DatasetDescriptorUtils; import org.apache.gobblin.service.modules.flow.FlowEdgeContext; import org.apache.gobblin.service.modules.flow.FlowGraphPath; import org.apache.gobblin.service.modules.flow.FlowUtils; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.flowgraph.DataNode; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.FlowEdge; import org.apache.gobblin.service.modules.flowgraph.FlowGraph; import org.apache.gobblin.service.modules.restli.FlowConfigUtils; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; @Alpha @Slf4j public abstract class AbstractPathFinder implements PathFinder { private static final String SOURCE_PREFIX = "source"; private static final String DESTINATION_PREFIX = "destination"; private List<DataNode> destNodes; FlowGraph flowGraph; DataNode srcNode; DatasetDescriptor srcDatasetDescriptor; DatasetDescriptor destDatasetDescriptor; //Maintain path of FlowEdges as parent-child map Map<FlowEdgeContext, FlowEdgeContext> pathMap; //Flow Execution Id protected Long flowExecutionId; protected FlowSpec flowSpec; protected Config flowConfig; AbstractPathFinder(FlowGraph flowGraph, FlowSpec flowSpec) throws ReflectiveOperationException { this(flowGraph, flowSpec, new HashMap<>()); } AbstractPathFinder(FlowGraph flowGraph, FlowSpec flowSpec, Map<String, String> dataNodeAliasMap) throws ReflectiveOperationException { this.flowGraph = flowGraph; this.flowSpec = flowSpec; this.flowExecutionId = FlowUtils.getOrCreateFlowExecutionId(flowSpec); this.flowConfig = flowSpec.getConfig().withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId)); //Get src/dest DataNodes from the flow config String srcNodeId = FlowConfigUtils.getDataNode(flowConfig, ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, dataNodeAliasMap); List<String> destNodeIds = FlowConfigUtils.getDataNodes(flowConfig, ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, dataNodeAliasMap); this.srcNode = this.flowGraph.getNode(srcNodeId); Preconditions.checkArgument(srcNode != null, "Flowgraph does not have a node with id " + srcNodeId); for (String destNodeId : destNodeIds) { DataNode destNode = this.flowGraph.getNode(destNodeId); Preconditions.checkArgument(destNode != null, "Flowgraph does not have a node with id " + destNodeId); if (this.destNodes == null) { this.destNodes = new ArrayList<>(); } this.destNodes.add(destNode); } // All dest nodes should be the same class if (this.destNodes != null && this.destNodes.stream().map(Object::getClass).collect(Collectors.toSet()).size() > 1) { throw new RuntimeException("All destination nodes must use the same DataNode class"); } //Should apply retention? boolean shouldApplyRetention = ConfigUtils.getBoolean(flowConfig, ConfigurationKeys.FLOW_APPLY_RETENTION, true); //Should apply retention on input dataset? boolean shouldApplyRetentionOnInput = ConfigUtils.getBoolean(flowConfig, ConfigurationKeys.FLOW_APPLY_INPUT_RETENTION, false); if ((shouldApplyRetentionOnInput) && (!shouldApplyRetention)) { //Invalid retention config throw new RuntimeException("Invalid retention configuration - shouldApplyRetentionOnInput = " + shouldApplyRetentionOnInput + ", and shouldApplyRetention = " + shouldApplyRetention); } //Get src/dest dataset descriptors from the flow config Config srcDatasetDescriptorConfig = flowConfig.getConfig(DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX); srcDatasetDescriptorConfig = srcDatasetDescriptorConfig.withValue(DatasetDescriptorConfigKeys.IS_INPUT_DATASET, ConfigValueFactory.fromAnyRef(true)); Config destDatasetDescriptorConfig = flowConfig.getConfig(DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX); destDatasetDescriptorConfig = destDatasetDescriptorConfig.withValue(DatasetDescriptorConfigKeys.IS_INPUT_DATASET, ConfigValueFactory.fromAnyRef(false));; //Add retention config for source and destination dataset descriptors. if (shouldApplyRetentionOnInput) { // We should run retention on source dataset. To ensure a retention is run, set // isRetentionApplied=false for source dataset. srcDatasetDescriptorConfig = srcDatasetDescriptorConfig .withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef(false)); } else { // Don't apply retention on source dataset. // // If ConfigurationKeys.FLOW_APPLY_RETENTION is true, isRetentionApplied is set to true for the source dataset. // The PathFinder will therefore treat the source dataset as one on which retention has already been // applied, preventing retention from running on the source dataset. // // On the other hand, if ConfigurationKeys.FLOW_APPLY_RETENTION is false // we do not apply retention - neither on the source dataset nor anywhere along the path to the destination. srcDatasetDescriptorConfig = srcDatasetDescriptorConfig .withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef(shouldApplyRetention)); } destDatasetDescriptorConfig = destDatasetDescriptorConfig.withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef(shouldApplyRetention)); //Add the retention configs to the FlowConfig flowConfig = flowConfig.withValue(ConfigurationKeys.FLOW_APPLY_RETENTION, ConfigValueFactory.fromAnyRef(shouldApplyRetention)); flowConfig = flowConfig.withValue(ConfigurationKeys.FLOW_APPLY_INPUT_RETENTION, ConfigValueFactory.fromAnyRef(shouldApplyRetentionOnInput)); srcDatasetDescriptorConfig = srcDatasetDescriptorConfig.withFallback(getDefaultConfig(this.srcNode)); if (this.destNodes != null) { destDatasetDescriptorConfig = destDatasetDescriptorConfig.withFallback(getDefaultConfig(this.destNodes.get(0))); } this.srcDatasetDescriptor = DatasetDescriptorUtils.constructDatasetDescriptor(srcDatasetDescriptorConfig); this.destDatasetDescriptor = DatasetDescriptorUtils.constructDatasetDescriptor(destDatasetDescriptorConfig); } public static Config getDefaultConfig(DataNode dataNode) { Config defaultConfig = ConfigFactory.empty(); if (dataNode.getDefaultDatasetDescriptorClass() != null) { defaultConfig = defaultConfig.withValue(DatasetDescriptorConfigKeys.CLASS_KEY, ConfigValueFactory.fromAnyRef(dataNode.getDefaultDatasetDescriptorClass())); } if (dataNode.getDefaultDatasetDescriptorPlatform() != null) { defaultConfig = defaultConfig.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef(dataNode.getDefaultDatasetDescriptorPlatform())); } return defaultConfig; } boolean isPathFound(DataNode currentNode, DataNode destNode, DatasetDescriptor currentDatasetDescriptor, DatasetDescriptor destDatasetDescriptor) { return (currentNode.equals(destNode)) && (currentDatasetDescriptor.equals(destDatasetDescriptor)); } /** * A helper method that sorts the {@link FlowEdge}s incident on srcNode based on whether the FlowEdge has an * output {@link DatasetDescriptor} that is compatible with the targetDatasetDescriptor. * @param dataNode the {@link DataNode} to be expanded for determining candidate edges. * @param currentDatasetDescriptor Output {@link DatasetDescriptor} of the current edge. * @param destDatasetDescriptor Target {@link DatasetDescriptor}. * @return prioritized list of {@link FlowEdge}s to be added to the edge queue for expansion. */ List<FlowEdgeContext> getNextEdges(DataNode dataNode, DatasetDescriptor currentDatasetDescriptor, DatasetDescriptor destDatasetDescriptor, int numberOfHops) { List<FlowEdgeContext> prioritizedEdgeList = new LinkedList<>(); List<String> edgeIds = ConfigUtils.getStringList(this.flowConfig, ConfigurationKeys.WHITELISTED_EDGE_IDS); for (FlowEdge flowEdge : this.flowGraph.getEdges(dataNode)) { if (!edgeIds.isEmpty() && !edgeIds.contains(flowEdge.getId())) { continue; } try { DataNode edgeDestination = this.flowGraph.getNode(flowEdge.getDest()); //Base condition: Skip this FLowEdge, if it is inactive or if the destination of this edge is inactive. if (!edgeDestination.isActive() || !flowEdge.isActive()) { continue; } boolean foundExecutor = false; //Iterate over all executors for this edge. Find the first one that resolves the underlying flow template. for (SpecExecutor specExecutor : flowEdge.getExecutors()) { Config mergedConfig = getMergedConfig(flowEdge); List<Pair<DatasetDescriptor, DatasetDescriptor>> datasetDescriptorPairs = flowEdge.getFlowTemplate().getDatasetDescriptors(mergedConfig, false); for (Pair<DatasetDescriptor, DatasetDescriptor> datasetDescriptorPair : datasetDescriptorPairs) { DatasetDescriptor inputDatasetDescriptor = datasetDescriptorPair.getLeft(); DatasetDescriptor outputDatasetDescriptor = datasetDescriptorPair.getRight(); HashMap<String, ArrayList<String>> errors = flowEdge.getFlowTemplate().tryResolving(mergedConfig, datasetDescriptorPair.getLeft(), datasetDescriptorPair.getRight()); HashMap<String, HashMap<String, ArrayList<String>>> edgeErrors = new HashMap<>(); HashMap<String, HashMap<String, ArrayList<String>>> templateErrors = new HashMap<>(); ObjectMapper mapper = new ObjectMapper(); edgeErrors.put(flowEdge.getId(), errors); if (errors.size() != 0) { try { flowSpec.addCompilationError(flowEdge.getSrc(), flowEdge.getDest(), mapper.writeValueAsString(edgeErrors)); } catch (JsonProcessingException e) { e.printStackTrace(); } continue; } ArrayList<String> datasetDescriptorErrors = inputDatasetDescriptor.contains(currentDatasetDescriptor); if (datasetDescriptorErrors.size() == 0) { DatasetDescriptor edgeOutputDescriptor = makeOutputDescriptorSpecific(currentDatasetDescriptor, outputDatasetDescriptor); FlowEdgeContext flowEdgeContext = new FlowEdgeContext(flowEdge, currentDatasetDescriptor, edgeOutputDescriptor, mergedConfig, specExecutor); if (destDatasetDescriptor.getFormatConfig().contains(outputDatasetDescriptor.getFormatConfig()).size() == 0) { /* Add to the front of the edge list if platform-independent properties of the output descriptor is compatible with those of destination dataset descriptor. In other words, we prioritize edges that perform data transformations as close to the source as possible. */ prioritizedEdgeList.add(0, flowEdgeContext); } else { prioritizedEdgeList.add(flowEdgeContext); } foundExecutor = true; } else { HashMap<String, ArrayList<String>> templateError = new HashMap<>(); templateError.put("flowTemplateErrors", datasetDescriptorErrors); templateErrors.put(flowEdge.getId(), templateError); try { flowSpec.addCompilationError(flowEdge.getSrc(), flowEdge.getDest(), mapper.writeValueAsString(templateErrors), numberOfHops); } catch (JsonProcessingException e) { e.printStackTrace(); } } } // Found a SpecExecutor. Proceed to the next FlowEdge. // TODO: Choose the min-cost executor for the FlowEdge as opposed to the first one that resolves. if (foundExecutor) { break; } } } catch (IOException | ReflectiveOperationException | SpecNotFoundException | JobTemplate.TemplateException e) { //Skip the edge; and continue log.warn("Skipping edge {} with config {} due to exception: {}", flowEdge.getId(), flowConfig.toString(), e); } } return prioritizedEdgeList; } /** * A helper method to make the output {@link DatasetDescriptor} of a {@link FlowEdge} "specific". More precisely, * we replace any "placeholder" configurations in the output {@link DatasetDescriptor} with specific configuration * values obtained from the input {@link DatasetDescriptor}. A placeholder configuration is one which is not * defined or is set to {@link DatasetDescriptorConfigKeys#DATASET_DESCRIPTOR_CONFIG_ANY}. * * Example: Consider a {@link FlowEdge} that applies retention on an input dataset. Further assume that this edge * is applicable to datasets of all formats. The input and output descriptors of this edge may be described using the following * configs: * inputDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"any","encrypt":{"algorithm":"any","keystore_encoding":"any","keystore_type":"any"},"format":"any", * "isRetentionApplied":false,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})) * * outputDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"any","encrypt":{"algorithm":"any","keystore_encoding":"any","keystore_type":"any"},"format":"any", * "isRetentionApplied":true,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})) * * Let the intermediate dataset descriptor "arriving" at this edge be described using the following config: * currentDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"gzip","encrypt":{"algorithm":"aes_rotating","keystore_encoding":"base64","keystore_type":"json"},"format":"json", * "isRetentionApplied":false,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})). * * This method replaces the placeholder configs in outputDescriptor with specific values from currentDescriptor to return: * returnedDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"gzip","encrypt":{"algorithm":"aes_rotating","keystore_encoding":"base64","keystore_type":"json"},"format":"json", * "isRetentionApplied":<b>true</b>,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})). * * @param currentDescriptor intermediate {@link DatasetDescriptor} obtained during path finding. * @param outputDescriptor output {@link DatasetDescriptor} of a {@link FlowEdge}. * @return {@link DatasetDescriptor} with placeholder configs in outputDescriptor substituted with specific values * from the currentDescriptor. */ private DatasetDescriptor makeOutputDescriptorSpecific(DatasetDescriptor currentDescriptor, DatasetDescriptor outputDescriptor) throws ReflectiveOperationException { Config config = outputDescriptor.getRawConfig(); for (Map.Entry<String, ConfigValue> entry : currentDescriptor.getRawConfig().entrySet()) { String entryValue = entry.getValue().unwrapped().toString(); if (!isPlaceHolder(entryValue)) { String entryValueInOutputDescriptor = ConfigUtils.getString(config, entry.getKey(), StringUtils.EMPTY); if (isPlaceHolder(entryValueInOutputDescriptor)) { config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entryValue)); } } } return GobblinConstructorUtils.invokeLongestConstructor(outputDescriptor.getClass(), config); } /** * A placeholder configuration is one which is not defined or is set to {@link DatasetDescriptorConfigKeys#DATASET_DESCRIPTOR_CONFIG_ANY}. * @param value to be examined for determining if it is a placeholder. * @return true if the value is null or empty or equals {@link DatasetDescriptorConfigKeys#DATASET_DESCRIPTOR_CONFIG_ANY}. */ private boolean isPlaceHolder(String value) { return Strings.isNullOrEmpty(value) || value.equals(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); } /** * Build the merged config for each {@link FlowEdge}, which is a combination of (in the precedence described below): * <ul> * <p> the user provided flow config </p> * <p> edge specific properties/overrides </p> * <p> source node config </p> * <p> destination node config </p> * </ul> * Each {@link JobTemplate}'s config will eventually be resolved against this merged config. * @param flowEdge An instance of {@link FlowEdge}. * @return the merged config derived as described above. */ private Config getMergedConfig(FlowEdge flowEdge) { Config srcNodeConfig = this.flowGraph.getNode(flowEdge.getSrc()).getRawConfig().atPath(SOURCE_PREFIX); Config destNodeConfig = this.flowGraph.getNode(flowEdge.getDest()).getRawConfig().atPath(DESTINATION_PREFIX); Config mergedConfig = flowConfig.withFallback(flowEdge.getConfig()).withFallback(srcNodeConfig).withFallback(destNodeConfig); return mergedConfig; } /** * * @param flowEdgeContext of the last {@link FlowEdge} in the path. * @return a {@link Dag} of {@link JobExecutionPlan}s for the input {@link FlowSpec}. */ List<FlowEdgeContext> constructPath(FlowEdgeContext flowEdgeContext) { //Backtrace from the last edge using the path map and push each edge into a LIFO data structure. List<FlowEdgeContext> path = new LinkedList<>(); path.add(flowEdgeContext); FlowEdgeContext currentFlowEdgeContext = flowEdgeContext; //While we are not at the first edge in the path, add the edge to the path while (!this.pathMap.get(currentFlowEdgeContext).equals(currentFlowEdgeContext)) { path.add(0, this.pathMap.get(currentFlowEdgeContext)); currentFlowEdgeContext = this.pathMap.get(currentFlowEdgeContext); } return path; } @Override public FlowGraphPath findPath() throws PathFinderException { FlowGraphPath flowGraphPath = new FlowGraphPath(flowSpec, flowExecutionId); // Path computation must be thread-safe to guarantee read consistency. In other words, we prevent concurrent read/write access to the // flow graph. for (DataNode destNode : this.destNodes) { List<FlowEdgeContext> path = findPathUnicast(destNode); if (path != null) { log.info("Path to destination node {} found for flow {}. Path - {}", destNode.getId(), flowSpec.getUri(), path); flowGraphPath.addPath(path); } else { log.error("Path to destination node {} could not be found for flow {}.", destNode.getId(), flowSpec.getUri()); //No path to at least one of the destination nodes. return null; } } return flowGraphPath; } public abstract List<FlowEdgeContext> findPathUnicast(DataNode destNode) throws PathFinderException; }
3,852
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/pathfinder/BFSPathFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.pathfinder; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.service.modules.dataset.DatasetDescriptor; import org.apache.gobblin.service.modules.flow.FlowEdgeContext; import org.apache.gobblin.service.modules.flowgraph.DataNode; import org.apache.gobblin.service.modules.flowgraph.FlowEdge; import org.apache.gobblin.service.modules.flowgraph.FlowGraph; /** * An implementation of {@link PathFinder} that assumes an unweighted {@link FlowGraph} and computes the * shortest path using a variant of the BFS path-finding algorithm. This implementation has two key differences from the * traditional BFS implementations: * <ul> * <p> the input graph is a multi-graph i.e. there could be multiple edges between each pair of nodes, and </p> * <p> each edge has a label associated with it. In our case, the label corresponds to the set of input/output * dataset descriptors that are accepted by the edge.</p> * </ul> * Given these differences, we maintain: * <p> a {@link HashMap} of list of visited edges, as opposed to list of visited * vertices as in the case of traditional BFS, and </p> * <p> for each edge, we maintain additional state that includes the input/output dataset descriptor * associated with the particular visitation of that edge. </p> * This additional information allows us to accurately mark edges as visited and guarantee termination of the algorithm. */ @Alpha @Slf4j public class BFSPathFinder extends AbstractPathFinder { /** * Constructor. * @param flowGraph */ public BFSPathFinder(FlowGraph flowGraph, FlowSpec flowSpec) throws ReflectiveOperationException { this(flowGraph, flowSpec, new HashMap<>()); } public BFSPathFinder(FlowGraph flowGraph, FlowSpec flowSpec, Map<String, String> dataNodeAliasMap) throws ReflectiveOperationException { super(flowGraph, flowSpec, dataNodeAliasMap); } /** * A simple path finding algorithm based on Breadth-First Search. At every step the algorithm adds the adjacent {@link FlowEdge}s * to a queue. The {@link FlowEdge}s whose output {@link DatasetDescriptor} matches the destDatasetDescriptor are * added first to the queue. This ensures that dataset transformations are always performed closest to the source. * @return a path of {@link FlowEdgeContext}s starting at the srcNode and ending at the destNode. */ public List<FlowEdgeContext> findPathUnicast(DataNode destNode) { //Initialization of auxiliary data structures used for path computation this.pathMap = new HashMap<>(); int numberOfHops = 1; LinkedList<FlowEdgeContext> childQueue = new LinkedList<>(); //Path computation must be thread-safe to guarantee read consistency. In other words, we prevent concurrent read/write access to the // flow graph. //Base condition 1: Source Node or Dest Node is inactive; return null if (!srcNode.isActive() || !destNode.isActive()) { log.warn("Either source node {} or destination node {} is inactive; skipping path computation.", this.srcNode.getId(), destNode.getId()); return null; } //Base condition 2: Check if we are already at the target. If so, return an empty path. if ((srcNode.equals(destNode)) && destDatasetDescriptor.contains(srcDatasetDescriptor).size() == 0) { return new ArrayList<>(0); } LinkedList<FlowEdgeContext> edgeQueue = new LinkedList<>(getNextEdges(srcNode, srcDatasetDescriptor, destDatasetDescriptor, numberOfHops)); for (FlowEdgeContext flowEdgeContext : edgeQueue) { this.pathMap.put(flowEdgeContext, flowEdgeContext); } //At every step, pop an edge E from the edge queue. Mark the edge E as visited. Generate the list of adjacent edges // to the edge E. For each adjacent edge E', do the following: // 1. check if the FlowTemplate described by E' is resolvable using the flowConfig, and // 2. check if the output dataset descriptor of edge E is compatible with the input dataset descriptor of the // edge E'. If yes, add the edge E' to the edge queue. // If the edge E' satisfies 1 and 2, add it to the edge queue for further consideration. do { numberOfHops++; while (!edgeQueue.isEmpty()) { FlowEdgeContext flowEdgeContext = edgeQueue.pop(); DataNode currentNode = this.flowGraph.getNode(flowEdgeContext.getEdge().getDest()); DatasetDescriptor currentOutputDatasetDescriptor = flowEdgeContext.getOutputDatasetDescriptor(); //Are we done? if (isPathFound(currentNode, destNode, currentOutputDatasetDescriptor, destDatasetDescriptor)) { return constructPath(flowEdgeContext); } //Expand the currentNode to its adjacent edges and add them to the queue. List<FlowEdgeContext> nextEdges = getNextEdges(currentNode, currentOutputDatasetDescriptor, destDatasetDescriptor, numberOfHops); for (FlowEdgeContext childFlowEdgeContext : nextEdges) { //Add a pointer from the child edge to the parent edge, if the child edge is not already in the // queue. if (!this.pathMap.containsKey(childFlowEdgeContext)) { childQueue.add(childFlowEdgeContext); this.pathMap.put(childFlowEdgeContext, flowEdgeContext); } } } if (!childQueue.isEmpty()) { edgeQueue.addAll(childQueue); childQueue.clear(); } } while (!edgeQueue.isEmpty()); //No path found. Return null. return null; } }
3,853
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/pathfinder/PathFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.pathfinder; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flow.FlowGraphPath; /** * An interface for computing a path in a {@link org.apache.gobblin.service.modules.flowgraph.FlowGraph}. Each * implementation of {@link PathFinder} implements a specific path finding algorithm such as Breadth-First Search (BFS), * Dijkstra's shortest-path algorithm etc. */ @Alpha public interface PathFinder { public FlowGraphPath findPath() throws PathFinderException; public static class PathFinderException extends Exception { public PathFinderException(String message, Throwable cause) { super(message, cause); } public PathFinderException(String message) { super(message); } } }
3,854
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/HttpDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.service.modules.dataset.HttpDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; import org.apache.gobblin.service.modules.flowgraph.DataNode; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; /** * Represents a HTTP source. Whether the source provides a REST complied API is not enforced. * Currently supports HTTPS with port default to 443 */ @EqualsAndHashCode (callSuper = true) public class HttpDataNode extends BaseDataNode { @Getter private String httpDomain; @Getter private String authenticationType; public static final String PLATFORM = "http"; public HttpDataNode(Config nodeProps) throws DataNode.DataNodeCreationException { super(nodeProps); try { this.httpDomain = ConfigUtils.getString(nodeProps, FlowGraphConfigurationKeys.DATA_NODE_HTTP_DOMAIN_KEY, ""); // Authentication details and credentials should reside in the Gobblin job payload this.authenticationType = ConfigUtils.getString( nodeProps, FlowGraphConfigurationKeys.DATA_NODE_HTTP_AUTHENTICATION_TYPE_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(httpDomain), FlowGraphConfigurationKeys.DATA_NODE_HTTP_DOMAIN_KEY + " cannot be null or empty."); Preconditions.checkArgument(!Strings.isNullOrEmpty(authenticationType), FlowGraphConfigurationKeys.DATA_NODE_HTTP_AUTHENTICATION_TYPE_KEY + " cannot be null or empty."); } catch (Exception e) { throw new DataNode.DataNodeCreationException(e); } } @Override public String getDefaultDatasetDescriptorClass() { return HttpDatasetDescriptor.class.getCanonicalName(); } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,855
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/SqlDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.service.modules.dataset.SqlDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; @EqualsAndHashCode (callSuper = true) public class SqlDataNode extends BaseDataNode { public static final String SQL_HOSTNAME = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "sql.hostname"; public static final String SQL_PORT = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "sql.port"; public static final String SQL_DRIVER = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "sql.driver"; @Getter private String hostName; @Getter private Integer port; @Getter private String jdbcDriver; public SqlDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); try { this.hostName = ConfigUtils.getString(nodeProps, SQL_HOSTNAME, ""); this.port = ConfigUtils.getInt(nodeProps, SQL_PORT, 0); this.jdbcDriver = ConfigUtils.getString(nodeProps, SQL_DRIVER, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(hostName), SQL_HOSTNAME + " cannot be null or empty."); Preconditions.checkArgument(port != 0, SQL_PORT + " cannot be empty."); Preconditions.checkArgument(!Strings.isNullOrEmpty(jdbcDriver), SQL_DRIVER + " cannot be null or empty."); } catch (Exception e) { throw new DataNodeCreationException(e); } } @Override public String getDefaultDatasetDescriptorClass() { return SqlDatasetDescriptor.class.getCanonicalName(); } }
3,856
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/iceberg/IcebergOnHiveDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.iceberg; import com.typesafe.config.Config; import lombok.EqualsAndHashCode; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.data.management.copy.iceberg.IcebergHiveCatalog; import org.apache.gobblin.service.modules.dataset.IcebergDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.datanodes.hive.HiveMetastoreUriDataNode; /** * In addition to the required properties of a {@link HiveMetastoreUriDataNode}, an {@link IcebergOnHiveDataNode} * must have a metastore URI specified. Specifies iceberg platform and uniquely identifies a hive catalog. * See {@link IcebergHiveCatalog} for more information */ @Alpha @EqualsAndHashCode(callSuper = true) public class IcebergOnHiveDataNode extends HiveMetastoreUriDataNode { public static final String PLATFORM = "iceberg"; /** * Constructor. An IcebergOnHiveDataNode must have hive.metastore.uri property specified to get {@link IcebergHiveCatalog} information * @param nodeProps */ public IcebergOnHiveDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } @Override public String getDefaultDatasetDescriptorClass() { return IcebergDatasetDescriptor.class.getCanonicalName(); } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,857
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/iceberg/IcebergDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.iceberg; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.data.management.copy.iceberg.IcebergHiveCatalog; import org.apache.gobblin.service.modules.dataset.IcebergDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; /** * Specifies iceberg platform and uniquely identifies an Iceberg Catalog based on the URI. * See {@link IcebergHiveCatalog} for more information */ @Alpha @EqualsAndHashCode(callSuper = true) public class IcebergDataNode extends BaseDataNode { public static final String CATALOG_URI_KEY = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "iceberg.catalog.uri"; public static final String PLATFORM = "iceberg"; @Getter private String catalogUri; /** * Constructor. An IcebergDataNode must have iceberg.catalog.uri property specified to get information about specific catalog. eg. {@link IcebergHiveCatalog} * @param nodeProps */ public IcebergDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); try { this.catalogUri = ConfigUtils.getString(nodeProps, CATALOG_URI_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(this.catalogUri), "iceberg.catalog.uri cannot be null or empty."); } catch (Exception e) { throw new DataNodeCreationException(e); } } @Override public String getDefaultDatasetDescriptorClass() { return IcebergDatasetDescriptor.class.getCanonicalName(); } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,858
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/hive/HiveMetastoreUriDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.hive; import java.io.IOException; import java.net.URI; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; /** * An {@link HiveMetastoreUriDataNode} implementation. In addition to the required properties of a {@link BaseDataNode}, an {@link HiveMetastoreUriDataNode} * must have a metastore URI specified. */ @Alpha @EqualsAndHashCode(callSuper = true) public class HiveMetastoreUriDataNode extends BaseDataNode { public static final String METASTORE_URI_KEY = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri"; @Getter private String metastoreUri; /** * Constructor. A HiveMetastoreUriDataNode must have hive.metastore.uri property specified in addition to a node Id and fs.uri. */ public HiveMetastoreUriDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); try { this.metastoreUri = ConfigUtils.getString(nodeProps, METASTORE_URI_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(this.metastoreUri), "hive.metastore.uri cannot be null or empty."); //Validate the srcFsUri and destFsUri of the DataNode. if (!isMetastoreUriValid(new URI(this.metastoreUri))) { throw new IOException("Invalid hive metastore URI " + this.metastoreUri); } } catch (Exception e) { throw new DataNodeCreationException(e); } } /** * @param metastoreUri hive metastore URI * @return true if the scheme is "thrift" and authority is not empty. */ public boolean isMetastoreUriValid(URI metastoreUri) { String scheme = metastoreUri.getScheme(); if (!scheme.equals("thrift")) { return false; } //Ensure that the authority is not empty if (com.google.common.base.Strings.isNullOrEmpty(metastoreUri.getAuthority())) { return false; } return true; } }
3,859
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/hive/HiveDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.hive; import com.typesafe.config.Config; import lombok.EqualsAndHashCode; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.dataset.HiveDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; /** * An {@link HiveDataNode} implementation. In addition to the required properties of a {@link BaseDataNode}, an {@link HiveDataNode} * specifies platform as hive. */ @Alpha @EqualsAndHashCode (callSuper = true) public class HiveDataNode extends HiveMetastoreUriDataNode { public static final String PLATFORM = "hive"; /** * Constructor. A HiveDataNode must have hive.metastore.uri property specified in addition to a node Id and fs.uri. */ public HiveDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } @Override public String getDefaultDatasetDescriptorClass() { return HiveDatasetDescriptor.class.getCanonicalName(); } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,860
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/SftpDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import java.net.URI; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; @EqualsAndHashCode(callSuper = true) public class SftpDataNode extends FileSystemDataNode { public static final String SFTP_SCHEME = "sftp"; public static final String DEFAULT_SFTP_URI = "sftp:///"; public static final String PLATFORM = "sftpfs"; public static final String SFTP_HOSTNAME = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "sftp.hostname"; public static final String SFTP_PORT = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "sftp.port"; @Getter private String hostName; @Getter private Integer port; @Getter private Config rawConfig; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(SFTP_PORT, ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT) .build()); /** * Constructor. A SFTP DataNode must have {@link SftpDataNode#SFTP_HOSTNAME} configured. */ public SftpDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps.withFallback(ConfigFactory.empty().withValue(FileSystemDataNode.FS_URI_KEY, ConfigValueFactory.fromAnyRef(DEFAULT_SFTP_URI)))); try { this.rawConfig = nodeProps.withFallback(DEFAULT_FALLBACK).withFallback(super.getRawConfig()); this.hostName = ConfigUtils.getString(this.rawConfig, SFTP_HOSTNAME, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(hostName), SFTP_HOSTNAME + " cannot be null or empty."); this.port = ConfigUtils.getInt(this.rawConfig, SFTP_PORT, -1); Preconditions.checkArgument(this.port > 0, "Invalid value for " + SFTP_PORT + ": " + this.port); } catch (Exception e) { throw new DataNodeCreationException(e); } } @Override public boolean isUriValid(URI fsUri) { return fsUri.getScheme().equals(SFTP_SCHEME); } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,861
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/HDFSVolumeDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import com.typesafe.config.Config; import org.apache.gobblin.service.modules.dataset.FSVolumeDatasetDescriptor; /** * An implementation of {@link HdfsDataNode}. With default dataset descriptor class to be {@link FSVolumeDatasetDescriptor} which expect * fs.uri to be set in dataset as well */ public class HDFSVolumeDataNode extends HdfsDataNode{ public HDFSVolumeDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } @Override public String getDefaultDatasetDescriptorClass() { return FSVolumeDatasetDescriptor.class.getCanonicalName(); } }
3,862
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/FileSystemDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import java.io.IOException; import java.net.URI; import com.google.common.base.Preconditions; import com.typesafe.config.Config; import joptsimple.internal.Strings; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor; import org.apache.gobblin.service.modules.flowgraph.BaseDataNode; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.util.ConfigUtils; /** * An abstract {@link FileSystemDataNode} implementation. In addition to the required properties of a {@link BaseDataNode}, an {@link FileSystemDataNode} * must have a FS URI specified. Example implementations of {@link FileSystemDataNode} include {@link HdfsDataNode}, {@link LocalFSDataNode}. */ @Alpha @EqualsAndHashCode (callSuper = true) public abstract class FileSystemDataNode extends BaseDataNode { public static final String FS_URI_KEY = FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "fs.uri"; @Getter private String fsUri; /** * Constructor. An HDFS DataNode must have fs.uri property specified in addition to a node Id. */ public FileSystemDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); try { this.fsUri = ConfigUtils.getString(this.getResolvedConfig(), FS_URI_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(this.fsUri), "fs.uri cannot be null or empty."); //Validate the srcFsUri and destFsUri of the DataNode. if (!isUriValid(new URI(this.fsUri))) { throw new IOException("Invalid FS URI " + this.fsUri); } } catch (Exception e) { throw new DataNodeCreationException(e); } } public abstract boolean isUriValid(URI fsUri); @Override public String getDefaultDatasetDescriptorClass() { return FSDatasetDescriptor.class.getCanonicalName(); } }
3,863
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/LocalFSDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import java.net.URI; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; /** * An implementation of {@link LocalFSDataNode}. All the properties specific to a LocalFS based data node (e.g. fs.uri) * are validated here. */ @Alpha public class LocalFSDataNode extends FileSystemDataNode { public static final String LOCAL_FS_SCHEME = "file"; public static final String PLATFORM = "local"; public LocalFSDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } /** * * @param fsUri FileSystem URI * @return true if the scheme of fsUri equals "file" */ @Override public boolean isUriValid(URI fsUri) { String scheme = fsUri.getScheme(); if (scheme.equals(LOCAL_FS_SCHEME)) { return true; } return false; } @Override public String getDefaultDatasetDescriptorPlatform() { return PLATFORM; } }
3,864
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/AdlsDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import java.net.URI; import com.google.common.base.Strings; import com.typesafe.config.Config; /** * An implementation of an ADL (Azure Data Lake) {@link org.apache.gobblin.service.modules.flowgraph.DataNode}. */ public class AdlsDataNode extends FileSystemDataNode { public static final String ADLS_SCHEME = "adl"; public static final String ABFS_SCHEME = "abfs"; public AdlsDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } /** * @param fsUri FileSystem URI * @return true if the scheme is "adl" and authority is not empty. */ @Override public boolean isUriValid(URI fsUri) { String scheme = fsUri.getScheme(); //Check that the scheme is "adl" if (!scheme.equals(ADLS_SCHEME) && !scheme.equals(ABFS_SCHEME)) { return false; } //Ensure that the authority is not empty if (Strings.isNullOrEmpty(fsUri.getAuthority())) { return false; } return true; } @Override public String getDefaultDatasetDescriptorPlatform() { return ADLS_SCHEME; } }
3,865
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/HdfsDataNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.flowgraph.datanodes.fs; import java.net.URI; import com.google.common.base.Strings; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; /** * An implementation of {@link HdfsDataNode}. All the properties specific to a HDFS based data node (e.g. fs.uri) * are validated here. */ @Alpha public class HdfsDataNode extends FileSystemDataNode { public static final String HDFS_SCHEME = "hdfs"; public HdfsDataNode(Config nodeProps) throws DataNodeCreationException { super(nodeProps); } /** * * @param fsUri FileSystem URI * @return true if the scheme is "hdfs" and authority is not empty. */ @Override public boolean isUriValid(URI fsUri) { String scheme = fsUri.getScheme(); //Check that the scheme is "hdfs" if (!scheme.equals(getDefaultDatasetDescriptorPlatform())) { return false; } //Ensure that the authority is not empty if (Strings.isNullOrEmpty(fsUri.getAuthority())) { return false; } return true; } @Override public String getDefaultDatasetDescriptorPlatform() { return HDFS_SCHEME; } }
3,866
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/FSDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.GlobPattern; import org.apache.hadoop.fs.Path; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; /** * An implementation of {@link DatasetDescriptor} with FS-based storage. */ @Alpha @ToString (callSuper = true, exclude = {"rawConfig"}) @EqualsAndHashCode (callSuper = true, exclude = {"rawConfig"}) public class FSDatasetDescriptor extends BaseDatasetDescriptor implements DatasetDescriptor { @Getter private final String path; @Getter private final String subPaths; @Getter private final boolean isCompacted; @Getter private final boolean isCompactedAndDeduped; @Getter private final FSDatasetPartitionConfig partitionConfig; @Getter private final Config rawConfig; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(DatasetDescriptorConfigKeys.IS_COMPACTED_KEY, false) .put(DatasetDescriptorConfigKeys.IS_COMPACTED_AND_DEDUPED_KEY, false) .build()); public FSDatasetDescriptor(Config config) throws IOException { super(config); this.path = PathUtils .getPathWithoutSchemeAndAuthority(new Path(ConfigUtils.getString(config, DatasetDescriptorConfigKeys.PATH_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY))).toString(); this.subPaths = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.SUBPATHS_KEY, null); this.isCompacted = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_COMPACTED_KEY, false); this.isCompactedAndDeduped = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_COMPACTED_AND_DEDUPED_KEY, false); this.partitionConfig = new FSDatasetPartitionConfig(config); this.rawConfig = config.withFallback(getPartitionConfig().getRawConfig()).withFallback(DEFAULT_FALLBACK).withFallback(super.getRawConfig()); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } /** * If other descriptor has subpaths, this method checks that each concatenation of path + subpath is matched by this * path. Otherwise, it just checks the path. * * @param inputDatasetDescriptorConfig descriptor whose path/subpaths to check * @return true if all subpaths are matched by this {@link DatasetDescriptor}'s path, or if subpaths is null and * the other's path matches this path. */ @Override protected ArrayList<String> isPathContaining(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); String otherPath = inputDatasetDescriptorConfig.getPath(); String otherSubPaths = ((FSDatasetDescriptor) inputDatasetDescriptorConfig).getSubPaths(); // This allows the special case where "other" is a glob, but is also an exact match with "this" path. if (getPath().equals(otherPath)) { return errors; } if (otherSubPaths != null) { List<String> subPaths = Splitter.on(",").splitToList(StringUtils.stripEnd(StringUtils.stripStart(otherSubPaths, "{"), "}")); for (String subPath : subPaths) { ArrayList<String> pathErrors = isPathContaining(new Path(otherPath, subPath).toString(), inputDatasetDescriptorConfig.getIsInputDataset()); if (pathErrors.size() != 0) { return pathErrors; } } return errors; } else { return isPathContaining(otherPath, inputDatasetDescriptorConfig.getIsInputDataset()); } } /** * A helper to determine if the path description of this {@link DatasetDescriptor} is a superset of paths * accepted by the other {@link DatasetDescriptor}. If the path description of the other {@link DatasetDescriptor} * is a glob pattern, we return false. * * @param inputDatasetDescriptorConfigPath a glob pattern that describes a set of paths. * @return true if the glob pattern described by the otherPath matches the path in this {@link DatasetDescriptor}. */ private ArrayList<String> isPathContaining(String inputDatasetDescriptorConfigPath, Boolean inputDataset) { String datasetDescriptorPrefix = inputDataset ? DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX : DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX; ArrayList<String> errors = new ArrayList<>(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDataset, DatasetDescriptorConfigKeys.PATH_KEY, this.getPath(), inputDatasetDescriptorConfigPath, true); if (errors.size() != 0) { return errors; } if (DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY.equals(this.getPath())) { return errors; } if (PathUtils.isGlob(new Path(inputDatasetDescriptorConfigPath))) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_IS_GLOB_PATTERN, datasetDescriptorPrefix, DatasetDescriptorConfigKeys.PATH_KEY, inputDatasetDescriptorConfigPath)); return errors; } GlobPattern globPattern = new GlobPattern(this.getPath()); if (!globPattern.matches(inputDatasetDescriptorConfigPath)) { errors.add(String.format(DatasetDescriptorErrorUtils.DATASET_DESCRIPTOR_KEY_MISMATCH_ERROR_TEMPLATE_GLOB_PATTERN, datasetDescriptorPrefix, DatasetDescriptorConfigKeys.PATH_KEY, inputDatasetDescriptorConfigPath, this.getPath())); } return errors; } /** * {@inheritDoc} */ @Override public ArrayList<String> contains(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); if (super.contains(inputDatasetDescriptorConfig).size() != 0) { return super.contains(inputDatasetDescriptorConfig); } FSDatasetDescriptor inputFSDatasetDescriptor = (FSDatasetDescriptor) inputDatasetDescriptorConfig; DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.IS_COMPACTED_KEY, String.valueOf(this.isCompacted()), String.valueOf(inputFSDatasetDescriptor.isCompacted()), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.IS_COMPACTED_AND_DEDUPED_KEY, String.valueOf(this.isCompactedAndDeduped()), String.valueOf(inputFSDatasetDescriptor.isCompactedAndDeduped()), false); errors.addAll(this.getPartitionConfig().contains(inputFSDatasetDescriptor.getPartitionConfig())); return errors; } }
3,867
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/FormatConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.util.ArrayList; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; /** * A location-independent descriptor of a dataset, which describes a dataset in terms of its physical attributes. * The physical attributes include: * <ul> * <p> Data format (e.g. Avro, CSV, JSON). </p> * <p> Data encoding type (e.g. Gzip, Bzip2, Base64, Deflate). </p> * <p> Encryption properties (e.g. aes_rotating, gpg). </p> * </ul> */ @Alpha @ToString (exclude = {"rawConfig", "isInputDataset"}) @EqualsAndHashCode (exclude = {"rawConfig", "isInputDataset"}) public class FormatConfig { @Getter private final String format; @Getter private final String codecType; @Getter private final EncryptionConfig encryptionConfig; @Getter private final Config rawConfig; @Getter protected Boolean isInputDataset; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(DatasetDescriptorConfigKeys.FORMAT_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.CODEC_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .build()); public FormatConfig(Config config) throws IOException { this.format = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.FORMAT_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); this.codecType = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.CODEC_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); this.encryptionConfig = new EncryptionConfig(ConfigUtils.getConfig(config, DatasetDescriptorConfigKeys.ENCYPTION_PREFIX, ConfigFactory .empty())); this.rawConfig = config.withFallback(this.encryptionConfig.getRawConfig().atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX)). withFallback(DEFAULT_FALLBACK); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } public ArrayList<String> contains(FormatConfig inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); errors.addAll(containsFormat(inputDatasetDescriptorConfig.getFormat(), inputDatasetDescriptorConfig.getIsInputDataset())); errors.addAll(containsCodec(inputDatasetDescriptorConfig.getCodecType(), inputDatasetDescriptorConfig.getIsInputDataset())); errors.addAll(containsEncryptionConfig(inputDatasetDescriptorConfig.getEncryptionConfig())); return errors; } private ArrayList<String> containsFormat(String inputDatasetDescriptorConfigFormat, Boolean inputDataset) { ArrayList<String> errors = new ArrayList<>(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDataset, DatasetDescriptorConfigKeys.FORMAT_KEY, this.getFormat(), inputDatasetDescriptorConfigFormat, false); return errors; } private ArrayList<String> containsCodec(String inputDatasetDescriptorConfigCodecType, Boolean inputDataset) { ArrayList<String> errors = new ArrayList<>(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDataset, DatasetDescriptorConfigKeys.CODEC_KEY, this.getCodecType(), inputDatasetDescriptorConfigCodecType, false); return errors; } private ArrayList<String> containsEncryptionConfig(EncryptionConfig inputDatasetDescriptorConfigEncryptionConfig) { return this.getEncryptionConfig().contains(inputDatasetDescriptorConfigEncryptionConfig); } }
3,868
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/BaseDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.util.ArrayList; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; @EqualsAndHashCode (exclude = {"description", "rawConfig", "isInputDataset"}) @ToString (exclude = {"description", "rawConfig", "isInputDataset"}) public abstract class BaseDatasetDescriptor implements DatasetDescriptor { @Getter private final String platform; @Getter private final FormatConfig formatConfig; @Getter private final boolean isRetentionApplied; @Getter private final String description; @Getter private final Config rawConfig; @Getter protected Boolean isInputDataset; private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(DatasetDescriptorConfigKeys.PATH_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, false) .build()); public BaseDatasetDescriptor(Config config) throws IOException { Preconditions.checkArgument(config.hasPath(DatasetDescriptorConfigKeys.PLATFORM_KEY), "Dataset descriptor config must specify platform"); this.platform = config.getString(DatasetDescriptorConfigKeys.PLATFORM_KEY).toLowerCase(); this.formatConfig = new FormatConfig(config); this.isRetentionApplied = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, false); this.description = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.DESCRIPTION_KEY, ""); this.rawConfig = config.withFallback(this.formatConfig.getRawConfig()).withFallback(DEFAULT_FALLBACK); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } /** * {@inheritDoc} */ protected abstract ArrayList<String> isPathContaining(DatasetDescriptor other); /** * @return true if this {@link DatasetDescriptor} contains the other {@link DatasetDescriptor} i.e. the * datasets described by this {@link DatasetDescriptor} is a subset of the datasets described by the other * {@link DatasetDescriptor}. This operation is non-commutative. * @param inputDatasetDescriptorConfig This is the flow configuration that is sent in from user side and is compared against the flowgraph edges. */ @Override public ArrayList<String> contains(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); if (this == inputDatasetDescriptorConfig) { return errors; } DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.CLASS_KEY, this.getClass().toString(), inputDatasetDescriptorConfig.getClass().toString(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PLATFORM_KEY, this.getPlatform(), inputDatasetDescriptorConfig.getPlatform(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, String.valueOf(this.isRetentionApplied()), String.valueOf(inputDatasetDescriptorConfig.isRetentionApplied()), false); errors.addAll(isPathContaining(inputDatasetDescriptorConfig)); errors.addAll(getFormatConfig().contains(inputDatasetDescriptorConfig.getFormatConfig())); return errors; } }
3,869
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/FSDatasetPartitionConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import com.google.common.base.Enums; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; /** * A class that is used to describe partition configuration of a filesystem-based dataset. Common partitioning * types include "datetime" and "regex". For each partition type, the corresponding partition pattern (e.g. date pattern or * the regex pattern) is validated. */ @Slf4j @ToString (exclude = {"rawConfig", "isInputDataset"}) @EqualsAndHashCode (exclude = {"rawConfig", "isInputDataset"}) public class FSDatasetPartitionConfig { @Getter private final String partitionType; @Getter private final String partitionPattern; @Getter private final Config rawConfig; @Getter protected Boolean isInputDataset; public enum PartitionType { DATETIME("datetime"), REGEX("regex"), NONE(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE), ANY(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); private final String type; PartitionType(final String type) { this.type = type; } @Override public String toString() { return this.type; } } private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .build()); public FSDatasetPartitionConfig(Config config) throws IOException { String partitionType = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.PARTITION_PREFIX + "." + DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); String partitionPattern = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.PARTITION_PREFIX + "." + DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); if (partitionType.equalsIgnoreCase(PartitionType.NONE.name())) { partitionPattern = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE; } else if(partitionType.equalsIgnoreCase(PartitionType.ANY.name())) { partitionPattern = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY; } validatePartitionConfig(partitionType, partitionPattern); this.partitionType = partitionType; this.partitionPattern = partitionPattern; this.rawConfig = ConfigUtils.getConfig(config, DatasetDescriptorConfigKeys.PARTITION_PREFIX, DEFAULT_FALLBACK); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } private void validatePartitionConfig(String partitionType, String partitionPattern) throws IOException { if (!Enums.getIfPresent(PartitionType.class, partitionType.toUpperCase()).isPresent()) { log.error("Invalid partition type {}", partitionType); throw new IOException("Invalid partition type"); } switch (PartitionType.valueOf(partitionType.toUpperCase())) { case DATETIME: try { new SimpleDateFormat(partitionPattern); } catch (Exception e) { log.error("Invalid datetime partition pattern {}", partitionPattern); throw new IOException(e); } break; case REGEX: try { Pattern.compile(partitionPattern); } catch (PatternSyntaxException e) { log.error("Invalid regex partition pattern {}", partitionPattern); throw new IOException(e); } break; case NONE: if (!partitionPattern.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE)) { log.error("Partition pattern {} incompatible with partition type {}", partitionPattern, partitionType); throw new IOException("Incompatible partition pattern/type"); } break; case ANY: if (!partitionPattern.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY)) { log.error("Partition pattern {} incompatible with partition type {}", partitionPattern, partitionType); throw new IOException("Incompatible partition pattern/type"); } break; } } public ArrayList<String> contains(FSDatasetPartitionConfig inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyPartition(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PARTITION_PREFIX, DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, this.getPartitionType(), inputDatasetDescriptorConfig.getPartitionType(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyPartition(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PARTITION_PREFIX, DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, this.getPartitionPattern(), inputDatasetDescriptorConfig.getPartitionPattern(), false); return errors; } }
3,870
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/DatasetDescriptorUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; @Slf4j public class DatasetDescriptorUtils { /** * Given dataset descriptor config, construct a {@link DatasetDescriptor} object */ public static DatasetDescriptor constructDatasetDescriptor(Config descriptorConfig) throws ReflectiveOperationException { Class datasetDescriptorClass = Class.forName(descriptorConfig.getString(DatasetDescriptorConfigKeys.CLASS_KEY)); return (DatasetDescriptor) GobblinConstructorUtils.invokeLongestConstructor(datasetDescriptorClass, descriptorConfig); } }
3,871
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/DatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import com.typesafe.config.Config; import java.util.ArrayList; import org.apache.gobblin.annotation.Alpha; /** * The interface for dataset descriptors. Each dataset is described in terms of the following attributes: * <ul> * <p> platform (e.g. HDFS, ADLS, JDBC). </p> * <p> path, which describes the fully qualified name of the dataset. </p> * <p> a format descriptor, which encapsulates its representation (e.g. avro, csv), codec (e.g. gzip, deflate), and * encryption config (e.g. aes_rotating, gpg). </p> * </ul> */ @Alpha public interface DatasetDescriptor { /** * @return the dataset platform i.e. the storage system backing the dataset (e.g. HDFS, ADLS, JDBC etc.) */ public String getPlatform(); /** * Returns the fully qualified name of a dataset. The fully qualified name is the absolute directory path of a dataset * when the dataset is backed by a FileSystem. In the case of a database table, it is dbName.tableName. * @return dataset path. */ public String getPath(); /** * * @return storage format of the dataset. */ public FormatConfig getFormatConfig(); /** * @return true if retention has been applied to the dataset. */ public boolean isRetentionApplied(); /** * @return a human-readable description of the dataset. */ public String getDescription(); /** * @return an arraylist of errors when comparing whether the provided {@link DatasetDescriptor} contains the input {@link DatasetDescriptor} * i.e. the datasets described by the other {@link DatasetDescriptor} is a subset of this {@link DatasetDescriptor}. * This operation is non-commutative. */ public ArrayList<String> contains(DatasetDescriptor other); /** * @return the raw config. */ public Config getRawConfig(); /** * @return true if is input dataset. */ public Boolean getIsInputDataset(); }
3,872
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/EncryptionConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import com.google.common.base.Enums; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.util.ArrayList; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; @Slf4j @ToString(exclude = {"rawConfig", "isInputDataset"}) @EqualsAndHashCode (exclude = {"rawConfig", "isInputDataset"}) public class EncryptionConfig { @Getter private final String encryptionAlgorithm; @Getter private final String encryptionLevel; @Getter private final String encryptedFields; @Getter private final String keystoreType; @Getter private final String keystoreEncoding; @Getter private final Config rawConfig; @Getter protected Boolean isInputDataset; public enum EncryptionLevel { FILE("file"), ROW("row"), FIELD("field"), NONE(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE), ANY(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); private final String level; EncryptionLevel(final String level) { this.level = level; } @Override public String toString() { return this.level; } } private static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(DatasetDescriptorConfigKeys.ENCRYPTION_ALGORITHM_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_TYPE_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_ENCODING_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .put(DatasetDescriptorConfigKeys.ENCRYPTED_FIELDS, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY) .build()); public EncryptionConfig(Config encryptionConfig) throws IOException { this.encryptionAlgorithm = ConfigUtils.getString(encryptionConfig, DatasetDescriptorConfigKeys.ENCRYPTION_ALGORITHM_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); if (this.encryptionAlgorithm.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE)) { this.keystoreType = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE; this.keystoreEncoding = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE; this.encryptionLevel = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE; this.encryptedFields = DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE; } else { this.keystoreType = ConfigUtils.getString(encryptionConfig, DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_TYPE_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); this.keystoreEncoding = ConfigUtils.getString(encryptionConfig, DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_ENCODING_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); this.encryptionLevel = ConfigUtils.getString(encryptionConfig, DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); this.encryptedFields = ConfigUtils.getString(encryptionConfig, DatasetDescriptorConfigKeys.ENCRYPTED_FIELDS, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY).toLowerCase(); validate(this.encryptionLevel, this.encryptedFields); } this.rawConfig = encryptionConfig.withFallback(DEFAULT_FALLBACK); this.isInputDataset = ConfigUtils.getBoolean(encryptionConfig, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } private void validate(String encryptionLevel, String encryptedFields) throws IOException { if (!Enums.getIfPresent(EncryptionLevel.class, encryptionLevel.toUpperCase()).isPresent()) { throw new IOException("Invalid encryption level " + encryptionLevel); } switch (EncryptionLevel.valueOf(encryptionLevel.toUpperCase())) { case FIELD: if ((encryptedFields.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY)) || (encryptedFields.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE))) { log.error("Invalid input for encryptedFields {}", encryptedFields); throw new IOException("Invalid encryptedFields"); } break; case NONE: if (!encryptedFields.equalsIgnoreCase(DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_NONE)) { log.error("Invalid input for encryptedFields {}", encryptedFields); throw new IOException("Invalid encryptedFields"); } break; default: break; } return; } public ArrayList<String> contains(EncryptionConfig inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.ENCRYPTION_ALGORITHM_KEY, this.getEncryptionAlgorithm(), inputDatasetDescriptorConfig.getEncryptionAlgorithm(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_TYPE_KEY, this.getKeystoreType(), inputDatasetDescriptorConfig.getKeystoreType(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.ENCRYPTION_KEYSTORE_ENCODING_KEY, this.getKeystoreEncoding(), inputDatasetDescriptorConfig.getKeystoreEncoding(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, this.getEncryptionLevel(), inputDatasetDescriptorConfig.getEncryptionLevel(), false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.ENCRYPTED_FIELDS, this.getEncryptedFields(), inputDatasetDescriptorConfig.getEncryptedFields(), false); return errors; } }
3,873
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/IcebergDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import java.util.ArrayList; import java.util.List; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.typesafe.config.Config; import lombok.EqualsAndHashCode; import lombok.Getter; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; /** * {@link IcebergDatasetDescriptor} is a dataset descriptor for an Iceberg-based table, independent of the type of Iceberg catalog * Fields {@link IcebergDatasetDescriptor#databaseName} and {@link IcebergDatasetDescriptor#tableName} are used to * identify an iceberg. */ @EqualsAndHashCode (callSuper = true) public class IcebergDatasetDescriptor extends BaseDatasetDescriptor { protected static final String SEPARATION_CHAR = ";"; protected final String databaseName; protected final String tableName; @Getter private final String path; /** * Constructor for {@link IcebergDatasetDescriptor} * @param config * @throws IOException */ public IcebergDatasetDescriptor(Config config) throws IOException { super(config); if (!isPlatformValid()) { throw new IOException("Invalid platform specified for IcebergDatasetDescriptor: " + getPlatform()); } // setting defaults to empty; later used to throw as IO Exception this.databaseName = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.DATABASE_KEY, ""); this.tableName = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.TABLE_KEY, ""); if (this.databaseName.isEmpty() || this.tableName.isEmpty()) { throw new IOException("Invalid iceberg database or table name: " + this.databaseName + ":" + this.tableName); } this.path = fullyQualifiedTableName(this.databaseName, this.tableName); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } protected boolean isPlatformValid() { return "iceberg".equalsIgnoreCase(getPlatform()); } private String fullyQualifiedTableName(String databaseName, String tableName) { return Joiner.on(SEPARATION_CHAR).join(databaseName, tableName); } @Override protected ArrayList<String> isPathContaining(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); String otherPath = inputDatasetDescriptorConfig.getPath(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, this.getPath(), otherPath, true); if (errors.size() != 0) { return errors; } //Extract the dbName and tableName from otherPath List<String> parts = Splitter.on(SEPARATION_CHAR).splitToList(otherPath); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeySize(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, parts, otherPath, SEPARATION_CHAR, 2); if (errors.size() != 0) { return errors; } String otherDbName = parts.get(0); String otherTableName = parts.get(1); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.DATABASE_KEY, this.databaseName, otherDbName, false); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.TABLE_KEY, this.tableName, otherTableName, false); return errors; } }
3,874
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/FSVolumeDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import com.typesafe.config.Config; import java.io.IOException; import java.util.ArrayList; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; /** * An implementation of {@link FSVolumeDatasetDescriptor} with fs.uri specified. */ @Alpha @ToString(callSuper = true) @EqualsAndHashCode(callSuper = true) public class FSVolumeDatasetDescriptor extends FSDatasetDescriptor{ @Getter private final String fsUri; public FSVolumeDatasetDescriptor(Config config) throws IOException { super(config); this.fsUri = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.FS_URI_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } @Override public ArrayList<String> contains(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); if (super.contains(inputDatasetDescriptorConfig).size() != 0) { return super.contains(inputDatasetDescriptorConfig); } FSVolumeDatasetDescriptor other = (FSVolumeDatasetDescriptor) inputDatasetDescriptorConfig; DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.FS_URI_KEY, this.getFsUri(), other.getFsUri(), false); return errors; } }
3,875
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/HiveDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.hadoop.fs.GlobPattern; import com.google.common.base.Splitter; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import lombok.EqualsAndHashCode; import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper; import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder; import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist; import org.apache.gobblin.data.management.version.finder.DatePartitionHiveVersionFinder; import org.apache.gobblin.util.ConfigUtils; /** * As of now, {@link HiveDatasetDescriptor} has same implementation as that of {@link SqlDatasetDescriptor}. * Fields {@link HiveDatasetDescriptor#isPartitioned}, {@link HiveDatasetDescriptor#partitionColumn} and * {@link HiveDatasetDescriptor#partitionFormat} are used for methods 'equals' and 'hashCode'. */ @EqualsAndHashCode (exclude = {"whitelistBlacklist"}, callSuper = true) public class HiveDatasetDescriptor extends SqlDatasetDescriptor { static final String IS_PARTITIONED_KEY = "isPartitioned"; static final String PARTITION_COLUMN = "partition.column"; static final String PARTITION_FORMAT = "partition.format"; static final String CONFLICT_POLICY = "conflict.policy"; private final boolean isPartitioned; private final String partitionColumn; private final String partitionFormat; private final String conflictPolicy; WhitelistBlacklist whitelistBlacklist; public HiveDatasetDescriptor(Config config) throws IOException { super(config); this.isPartitioned = ConfigUtils.getBoolean(config, IS_PARTITIONED_KEY, true); if (isPartitioned) { partitionColumn = ConfigUtils.getString(config, PARTITION_COLUMN, DatePartitionHiveVersionFinder.DEFAULT_PARTITION_KEY_NAME); partitionFormat = ConfigUtils.getString(config, PARTITION_FORMAT, DatePartitionHiveVersionFinder.DEFAULT_PARTITION_VALUE_DATE_TIME_PATTERN); conflictPolicy = HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_PARTITIONS.name(); } else { partitionColumn = ""; partitionFormat = ""; conflictPolicy = HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_TABLE.name(); } whitelistBlacklist = new WhitelistBlacklist(config.withValue(WhitelistBlacklist.WHITELIST, ConfigValueFactory.fromAnyRef(createHiveDatasetWhitelist()))); this.setRawConfig(this.getRawConfig() .withValue(CONFLICT_POLICY, ConfigValueFactory.fromAnyRef(conflictPolicy)) .withValue(PARTITION_COLUMN, ConfigValueFactory.fromAnyRef(partitionColumn)) .withValue(PARTITION_FORMAT, ConfigValueFactory.fromAnyRef(partitionFormat)) .withValue(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, ConfigValueFactory.fromAnyRef(createHiveDatasetWhitelist()) )); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } // Using Hadoop's GlobPattern instead of java.util.regex, because could not find any API in java.util.regex // which tells if the string is a plain string or contains special characters. // If the db name contains wildcards, whitelist is created as <regex_db>.* // Otherwise, whitelist is created as <db>.tables. // This is the format which HiveDatasetFinder understands. // e.g. db=testDb, table=foo*,bar*, whitelist will be testDb.foo*|bar* String createHiveDatasetWhitelist() { if (new GlobPattern(this.databaseName).hasWildcard()) { return this.databaseName + ".*"; } else { return this.databaseName + "." + this.tableName.replace(',', '|'); } } @Override protected boolean isPlatformValid() { return "hive".equalsIgnoreCase(getPlatform()); } @Override protected ArrayList<String> isPathContaining(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); String otherPath = inputDatasetDescriptorConfig.getPath(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, this.getPath(), otherPath, true); if (errors.size() != 0){ return errors; } DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyPartition(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PARTITION_PREFIX, DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, String.valueOf(this.isPartitioned), String.valueOf(((HiveDatasetDescriptor) inputDatasetDescriptorConfig).isPartitioned), false); //Extract the dbName and tableName from otherPath List<String> parts = Splitter.on(SEPARATION_CHAR).splitToList(otherPath); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeySize(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, parts, otherPath, SEPARATION_CHAR, 2); if (errors.size() != 0) { return errors; } String otherDbName = parts.get(0); String otherTableNames = parts.get(1); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyBlacklist(errors, inputDatasetDescriptorConfig.getIsInputDataset(), "database", DatasetDescriptorConfigKeys.DATABASE_KEY, whitelistBlacklist, otherDbName, null); List<String> otherTables = Splitter.on(",").splitToList(otherTableNames); for (String otherTable : otherTables) { DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyBlacklist(errors, inputDatasetDescriptorConfig.getIsInputDataset(), "table", DatasetDescriptorConfigKeys.TABLE_KEY, whitelistBlacklist, otherDbName, otherTable); } return errors; } }
3,876
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/HttpDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import com.google.common.base.Enums; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import java.io.IOException; import java.util.ArrayList; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; /** * Describes a dataset behind a HTTP scheme. * path refers to the HTTP path of a given dataset. * e.g, https://some-api:443/user/123/names, where /user/123/names is the path * query string is not supported */ @ToString (exclude = {"rawConfig"}) @EqualsAndHashCode (exclude = {"rawConfig"}, callSuper = true) public class HttpDatasetDescriptor extends BaseDatasetDescriptor implements DatasetDescriptor { @Getter private final String path; @Getter private final Config rawConfig; public enum Platform { HTTP("http"), HTTPS("https"); private final String platform; Platform(final String platform) { this.platform = platform; } @Override public String toString() { return this.platform; } } public HttpDatasetDescriptor(Config config) throws IOException { super(config); if (!isPlatformValid()) { throw new IOException("Invalid platform specified for HttpDatasetDescriptor: " + getPlatform()); } // refers to the full HTTP url this.path = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.PATH_KEY, ""); this.rawConfig = config.withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef(this.path)).withFallback(super.getRawConfig()); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } /** * @return true if the platform is valid, false otherwise */ private boolean isPlatformValid() { return Enums.getIfPresent(HttpDatasetDescriptor.Platform.class, getPlatform().toUpperCase()).isPresent(); } /** * Check if this HTTP path equals the other HTTP path * * @param inputDatasetDescriptorConfig whose path should be in the format of a HTTP path */ @Override protected ArrayList<String> isPathContaining(DatasetDescriptor inputDatasetDescriptorConfig) { // Might be null ArrayList<String> errors = new ArrayList<>(); String otherPath = inputDatasetDescriptorConfig.getPath(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, this.getPath(), otherPath, false); return errors; } }
3,877
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/dataset/SqlDatasetDescriptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.dataset; import java.io.IOException; import java.util.ArrayList; import java.util.List; import com.google.common.base.Enums; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; import lombok.ToString; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorErrorUtils; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; @ToString (exclude = {"rawConfig"}) @EqualsAndHashCode (exclude = {"rawConfig"}, callSuper = true) public class SqlDatasetDescriptor extends BaseDatasetDescriptor implements DatasetDescriptor { protected static final String SEPARATION_CHAR = ";"; protected final String databaseName; protected final String tableName; @Getter private final String path; @Getter @Setter private Config rawConfig; public enum Platform { SQLSERVER("sqlserver"), MYSQL("mysql"), ORACLE("oracle"), POSTGRES("postgres"), TERADARA("teradata"); private final String platform; Platform(final String platform) { this.platform = platform; } @Override public String toString() { return this.platform; } } public SqlDatasetDescriptor(Config config) throws IOException { super(config); if (!isPlatformValid()) { throw new IOException("Invalid platform specified for SqlDatasetDescriptor: " + getPlatform()); } this.databaseName = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.DATABASE_KEY, ".*"); this.tableName = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.TABLE_KEY, ".*"); this.path = fullyQualifiedTableName(this.databaseName, this.tableName); this.rawConfig = config.withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef(this.path)).withFallback(super.getRawConfig()); this.isInputDataset = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_INPUT_DATASET, false); } private String fullyQualifiedTableName(String databaseName, String tableName) { return Joiner.on(SEPARATION_CHAR).join(databaseName, tableName); } protected boolean isPlatformValid() { return Enums.getIfPresent(Platform.class, getPlatform().toUpperCase()).isPresent(); } /** * Check if the dbName and tableName specified in {@param other}'s path are accepted by the set of dbName.tableName * combinations defined by the current {@link SqlDatasetDescriptor}. For example, let: * this.path = "test_.*;test_table_.*". Then: * isPathContaining("test_db1;test_table_1") = true * isPathContaining("testdb1;test_table_2") = false * * NOTE: otherPath cannot be a globPattern. So: * isPathContaining("test_db.*;test_table_*") = false * * @param inputDatasetDescriptorConfig whose path should be in the format of dbName.tableName */ @Override protected ArrayList<String> isPathContaining(DatasetDescriptor inputDatasetDescriptorConfig) { ArrayList<String> errors = new ArrayList<>(); String otherPath = inputDatasetDescriptorConfig.getPath(); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKey(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, this.getPath(), otherPath, true); if (errors.size() != 0) { return errors; } if (PathUtils.GLOB_TOKENS.matcher(otherPath).find()) { return errors; } //Extract the dbName and tableName from otherPath List<String> parts = Splitter.on(SEPARATION_CHAR).splitToList(otherPath); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeySize(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.PATH_KEY, parts, otherPath, SEPARATION_CHAR, 2); if (errors.size() != 0) { return errors; } String otherDbName = parts.get(0); String otherTableName = parts.get(1); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyRegex(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.DATABASE_KEY, this.databaseName, otherDbName); DatasetDescriptorErrorUtils.populateErrorForDatasetDescriptorKeyRegex(errors, inputDatasetDescriptorConfig.getIsInputDataset(), DatasetDescriptorConfigKeys.TABLE_KEY, this.tableName, otherTableName); return errors; } }
3,878
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template/FlowTemplate.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import org.apache.commons.lang3.tuple.Pair; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.service.modules.dataset.DatasetDescriptor; /** * An interface primarily for representing a flow of {@link JobTemplate}s. It also has * method for retrieving required configs for every {@link JobTemplate} in the flow. */ @Alpha public interface FlowTemplate extends Spec { /** * @return the {@link Collection} of {@link JobTemplate}s that belong to this {@link FlowTemplate}. */ List<JobTemplate> getJobTemplates(); /** * * @return all configuration inside pre-written template. */ Config getRawTemplateConfig(); /** * @param userConfig a list of user customized attributes. * @param resolvable if true, only return descriptors that resolve the {@link FlowTemplate} * @return list of input/output {@link DatasetDescriptor}s corresponding to the provied userConfig. */ List<Pair<DatasetDescriptor, DatasetDescriptor>> getDatasetDescriptors(Config userConfig, boolean resolvable) throws IOException, ReflectiveOperationException, SpecNotFoundException, JobTemplate.TemplateException; /** * Try to resolve the {@link FlowTemplate} using the provided {@link Config} object. A {@link FlowTemplate} * is resolvable only if each of the {@link JobTemplate}s in the flow is resolvable. Throws an exception if the flow is * not resolvable. * @param userConfig User supplied Config * @param inputDescriptor input {@link DatasetDescriptor} * @param outputDescriptor output {@link DatasetDescriptor} * @return */ HashMap<String, ArrayList<String>> tryResolving(Config userConfig, DatasetDescriptor inputDescriptor, DatasetDescriptor outputDescriptor); /** * Resolves the {@link JobTemplate}s underlying this {@link FlowTemplate} and returns a {@link List} of resolved * job {@link Config}s. * @param userConfig User supplied Config * @param inputDescriptor input {@link DatasetDescriptor} * @param outputDescriptor output {@link DatasetDescriptor} * @return a list of resolved job {@link Config}s. */ List<Config> getResolvedJobConfigs(Config userConfig, DatasetDescriptor inputDescriptor, DatasetDescriptor outputDescriptor) throws SpecNotFoundException, JobTemplate.TemplateException; }
3,879
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template/StaticFlowTemplate.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.typesafe.config.Config; import com.typesafe.config.ConfigException; import com.typesafe.config.ConfigResolveOptions; import com.typesafe.config.ConfigValueFactory; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.runtime.job_spec.JobSpecResolver; import org.apache.gobblin.runtime.job_spec.ResolvedJobSpec; import org.apache.gobblin.service.modules.dataset.DatasetDescriptor; import org.apache.gobblin.service.modules.dataset.DatasetDescriptorUtils; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.template_catalog.FlowCatalogWithTemplates; /** * A {@link FlowTemplate} using a static {@link Config} as the raw configuration for the template. */ @Alpha @Slf4j public class StaticFlowTemplate implements FlowTemplate { private static final long serialVersionUID = 84641624233978L; private static final String VARIABLE_SUBSTITUTION_PATTERN = ":[\\s]*Reader:[a-zA-Z\\d\\s]*:[\\s]"; private static final String JOB_TEMPLATE_PATTERN = "/jobs/"; @Getter private URI uri; @Getter private String version; @Getter private String description; @Getter private transient FlowCatalogWithTemplates catalog; @Getter private List<JobTemplate> jobTemplates; private transient Config rawConfig; private final transient JobSpecResolver jobSpecResolver; public StaticFlowTemplate(URI flowTemplateDirUri, String version, String description, Config config, FlowCatalogWithTemplates catalog) throws IOException, SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException { this.uri = flowTemplateDirUri; this.version = version; this.description = description; this.rawConfig = config; this.catalog = catalog; this.jobTemplates = this.catalog.getJobTemplatesForFlow(flowTemplateDirUri); this.jobSpecResolver = JobSpecResolver.builder(config).build(); } //Constructor for testing purposes public StaticFlowTemplate(URI uri, String version, String description, Config config, FlowCatalogWithTemplates catalog, List<JobTemplate> jobTemplates) { this.uri = uri; this.version = version; this.description = description; this.rawConfig = config; this.catalog = catalog; this.jobTemplates = jobTemplates; try { this.jobSpecResolver = JobSpecResolver.builder(config).build(); } catch (IOException ioe) { throw new RuntimeException(ioe); } } /** * Generate the input/output dataset descriptors for the {@link FlowTemplate}. * @param userConfig User supplied Config * @param resolvable Whether to return only resolvable dataset descriptors * @return a List of Input/Output DatasetDescriptors that correspond to this {@link FlowTemplate}. If resolvable is true, * only return descriptors that fully resolve it. */ @Override public List<Pair<DatasetDescriptor, DatasetDescriptor>> getDatasetDescriptors(Config userConfig, boolean resolvable) throws IOException { Config config = this.getResolvedFlowConfig(userConfig).resolve(ConfigResolveOptions.defaults().setAllowUnresolved(true)); if (!config.hasPath(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX) || !config.hasPath(DatasetDescriptorConfigKeys.FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX)) { throw new IOException("Flow template must specify at least one input/output dataset descriptor"); } int i = 0; String inputPrefix = Joiner.on(".").join(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX, Integer.toString(i)); List<Pair<DatasetDescriptor, DatasetDescriptor>> result = Lists.newArrayList(); while (config.hasPath(inputPrefix)) { try { Config inputDescriptorConfig = config.getConfig(inputPrefix); DatasetDescriptor inputDescriptor = DatasetDescriptorUtils.constructDatasetDescriptor(inputDescriptorConfig); String outputPrefix = Joiner.on(".") .join(DatasetDescriptorConfigKeys.FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX, Integer.toString(i)); Config outputDescriptorConfig = config.getConfig(outputPrefix); DatasetDescriptor outputDescriptor = DatasetDescriptorUtils.constructDatasetDescriptor(outputDescriptorConfig); if (resolvable) { HashMap<String, ArrayList<String>> errors = tryResolving(userConfig, inputDescriptor, outputDescriptor); if (errors.size() == 0) { result.add(ImmutablePair.of(inputDescriptor, outputDescriptor)); } } else { result.add(ImmutablePair.of(inputDescriptor, outputDescriptor)); } } catch (ReflectiveOperationException e) { //Cannot instantiate I/O dataset descriptor due to missing config; skip and try the next one. } inputPrefix = Joiner.on(".").join(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX, Integer.toString(++i)); } return result; } @Override public Config getRawTemplateConfig() { return this.rawConfig; } @Override public List<JobTemplate> getJobTemplates() { return this.jobTemplates; } private Config getResolvedFlowConfig(Config userConfig) { return userConfig.withFallback(this.rawConfig); } /** * Try to resolve the {@link FlowTemplate} using the provided {@link Config} object. A {@link FlowTemplate} * is resolvable only if each of the {@link JobTemplate}s in the flow is resolvable. Throws an exception if the flow is * not resolvable. * @param userConfig User supplied Config * @return errors through attempting to resolve job templates */ @Override public HashMap<String, ArrayList<String>> tryResolving(Config userConfig, DatasetDescriptor inputDescriptor, DatasetDescriptor outputDescriptor) { Config inputDescriptorConfig = inputDescriptor.getRawConfig().atPath(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX); Config outputDescriptorConfig = outputDescriptor.getRawConfig().atPath(DatasetDescriptorConfigKeys.FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX); userConfig = userConfig.withFallback(inputDescriptorConfig).withFallback(outputDescriptorConfig); JobSpec.Builder jobSpecBuilder = JobSpec.builder().withConfig(userConfig); HashMap<String, ArrayList<String>> resolutionErrors = new HashMap<>(); for (JobTemplate template: this.jobTemplates) { ArrayList<String> errors = new ArrayList<>(); try { this.jobSpecResolver.resolveJobSpec(jobSpecBuilder.withTemplate(template).build()); } catch (ConfigException e) { errors.add(e.toString().split(VARIABLE_SUBSTITUTION_PATTERN)[1]); } catch (Exception e) { log.error("Encountered exception during resolving job templates", e); } // Only insert into dictionary if errors exist if (errors.size() != 0) { String jobName = template.getUri().toString().split(JOB_TEMPLATE_PATTERN)[1]; resolutionErrors.put(jobName, errors); } } return resolutionErrors; } @Override public List<Config> getResolvedJobConfigs(Config userConfig, DatasetDescriptor inputDescriptor, DatasetDescriptor outputDescriptor) throws SpecNotFoundException, JobTemplate.TemplateException { Config inputDescriptorConfig = inputDescriptor.getRawConfig().atPath(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX); Config outputDescriptorConfig = outputDescriptor.getRawConfig().atPath(DatasetDescriptorConfigKeys.FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX); userConfig = userConfig.withFallback(inputDescriptorConfig).withFallback(outputDescriptorConfig); List<Config> resolvedJobConfigs = new ArrayList<>(getJobTemplates().size()); JobSpec.Builder jobSpecBuilder = JobSpec.builder().withConfig(userConfig); for (JobTemplate jobTemplate: getJobTemplates()) { ResolvedJobSpec resolvedJobSpec = this.jobSpecResolver.resolveJobSpec(jobSpecBuilder.withTemplate(jobTemplate).build()); Config resolvedJobConfig = resolvedJobSpec.getConfig().withValue( ConfigurationKeys.JOB_TEMPLATE_PATH, ConfigValueFactory.fromAnyRef(jobTemplate.getUri().toString())); resolvedJobConfigs.add(resolvedJobConfig); } return resolvedJobConfigs; } }
3,880
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template/HOCONInputStreamFlowTemplate.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URI; import java.net.URISyntaxException; import com.google.common.base.Charsets; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigResolveOptions; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.service.modules.template_catalog.FlowCatalogWithTemplates; import org.apache.gobblin.util.ConfigUtils; /** * A {@link FlowTemplate} that loads a HOCON file as a {@link StaticFlowTemplate}. */ public class HOCONInputStreamFlowTemplate extends StaticFlowTemplate { private static final String VERSION_KEY = "gobblin.flow.template.version"; private static final String DEFAULT_VERSION = "1"; public HOCONInputStreamFlowTemplate(InputStream inputStream, URI flowTemplateDirUri, FlowCatalogWithTemplates catalog) throws SpecNotFoundException, IOException, JobTemplate.TemplateException, URISyntaxException { this(ConfigFactory.parseReader(new InputStreamReader(inputStream, Charsets.UTF_8)).resolve( ConfigResolveOptions.defaults().setAllowUnresolved(true)), flowTemplateDirUri, catalog); } public HOCONInputStreamFlowTemplate(Config config, URI flowTemplateDirUri, FlowCatalogWithTemplates catalog) throws SpecNotFoundException, IOException, JobTemplate.TemplateException, URISyntaxException { super(flowTemplateDirUri, ConfigUtils.getString(config, VERSION_KEY, DEFAULT_VERSION), config.hasPath(ConfigurationKeys.FLOW_DESCRIPTION_KEY) ? config .getString(ConfigurationKeys.FLOW_DESCRIPTION_KEY) : "", config, catalog); } }
3,881
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanListSerializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.spec; import java.lang.reflect.Type; import java.util.List; import java.util.concurrent.ExecutionException; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; import com.typesafe.config.Config; import com.typesafe.config.ConfigRenderOptions; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.JobSpec; @Slf4j public class JobExecutionPlanListSerializer implements JsonSerializer<List<JobExecutionPlan>> { /** * Gson invokes this call-back method during serialization when it encounters a field of the * specified type. * * <p>In the implementation of this call-back method, you should consider invoking * {@link JsonSerializationContext#serialize(Object, Type)} method to create JsonElements for any * non-trivial field of the {@code src} object. However, you should never invoke it on the * {@code src} object itself since that will cause an infinite loop (Gson will call your * call-back method again).</p> * * @param src the object that needs to be converted to Json. * @param typeOfSrc the actual type (fully genericized version) of the source object. * @param context * @return a JsonElement corresponding to the specified object. */ @Override public JsonElement serialize(List<JobExecutionPlan> src, Type typeOfSrc, JsonSerializationContext context) { JsonArray jsonArray = new JsonArray(); for (JobExecutionPlan jobExecutionPlan: src) { JsonObject jobExecutionPlanJson = new JsonObject(); JsonObject jobSpecJson = new JsonObject(); JobSpec jobSpec = jobExecutionPlan.getJobSpec(); String uri = (jobSpec.getUri() != null) ? jobSpec.getUri().toString() : null; jobSpecJson.addProperty(SerializationConstants.JOB_SPEC_URI_KEY, uri); jobSpecJson.addProperty(SerializationConstants.JOB_SPEC_VERSION_KEY, jobSpec.getVersion()); jobSpecJson.addProperty(SerializationConstants.JOB_SPEC_DESCRIPTION_KEY, jobSpec.getDescription()); String jobSpecTemplateURI = (jobSpec.getTemplateURI().isPresent()) ? jobSpec.getTemplateURI().get().toString() : null; jobSpecJson.addProperty(SerializationConstants.JOB_SPEC_TEMPLATE_URI_KEY, jobSpecTemplateURI); jobSpecJson.addProperty(SerializationConstants.JOB_SPEC_CONFIG_KEY, jobSpec.getConfig().root().render(ConfigRenderOptions.concise())); jobExecutionPlanJson.add(SerializationConstants.JOB_SPEC_KEY, jobSpecJson); Config specExecutorConfig; try { specExecutorConfig = jobExecutionPlan.getSpecExecutor().getConfig().get(); } catch (InterruptedException | ExecutionException e) { log.error("Error serializing JobExecutionPlan {}", jobExecutionPlan.toString()); throw new RuntimeException(e); } JsonObject specExecutorJson = new JsonObject(); specExecutorJson.addProperty(SerializationConstants.SPEC_EXECUTOR_CONFIG_KEY, specExecutorConfig.root().render(ConfigRenderOptions.concise())); specExecutorJson.addProperty(SerializationConstants.SPEC_EXECUTOR_CLASS_KEY, jobExecutionPlan.getSpecExecutor().getClass().getName()); jobExecutionPlanJson.add(SerializationConstants.SPEC_EXECUTOR_KEY, specExecutorJson); String executionStatus = jobExecutionPlan.getExecutionStatus().name(); jobExecutionPlanJson.addProperty(SerializationConstants.EXECUTION_STATUS_KEY, executionStatus); jobExecutionPlanJson.addProperty(SerializationConstants.FLOW_START_TIME_KEY, jobExecutionPlan.getFlowStartTime()); try { String jobExecutionFuture = jobExecutionPlan.getSpecExecutor().getProducer().get() .serializeAddSpecResponse(jobExecutionPlan.getJobFuture().orNull()); jobExecutionPlanJson.addProperty(SerializationConstants.JOB_EXECUTION_FUTURE, jobExecutionFuture); } catch (InterruptedException | ExecutionException e) { log.warn("Error during serialization of JobExecutionFuture."); throw new RuntimeException(e); } jsonArray.add(jobExecutionPlanJson); } return jsonArray; } }
3,882
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/SerializationConstants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.spec; public class SerializationConstants { public static final String JOB_SPEC_URI_KEY= "uri"; public static final String JOB_SPEC_VERSION_KEY = "version"; public static final String JOB_SPEC_DESCRIPTION_KEY = "description"; public static final String JOB_SPEC_TEMPLATE_URI_KEY = "templateURI"; public static final String JOB_SPEC_CONFIG_KEY = "config"; public static final String JOB_SPEC_KEY = "jobSpec"; public static final String SPEC_EXECUTOR_CONFIG_KEY = "config"; public static final String SPEC_EXECUTOR_CLASS_KEY = "class"; public static final String SPEC_EXECUTOR_KEY = "specExecutor"; public static final String SPEC_EXECUTOR_URI_KEY = "uri"; public static final String EXECUTION_STATUS_KEY = "executionStatus"; public static final String JOB_EXECUTION_FUTURE = "jobExecutionFuture"; public static final String FLOW_START_TIME_KEY = "flowStartTime"; }
3,883
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanListDeserializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.spec; import java.lang.reflect.Type; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import com.google.common.base.Optional; import com.google.gson.JsonArray; import com.google.gson.JsonDeserializationContext; import com.google.gson.JsonDeserializer; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParseException; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.api.TopologySpec; import org.apache.gobblin.service.ExecutionStatus; @Slf4j public class JobExecutionPlanListDeserializer implements JsonDeserializer<List<JobExecutionPlan>> { private final Map<URI,TopologySpec> topologySpecMap; public JobExecutionPlanListDeserializer(Map<URI, TopologySpec> topologySpecMap) { this.topologySpecMap = topologySpecMap; } /** * Gson invokes this call-back method during deserialization when it encounters a field of the * specified type. * <p>In the implementation of this call-back method, you should consider invoking * {@link JsonDeserializationContext#deserialize(JsonElement, Type)} method to create objects * for any non-trivial field of the returned object. However, you should never invoke it on the * the same type passing {@code json} since that will cause an infinite loop (Gson will call your * call-back method again). * * @param json The Json data being deserialized * @param typeOfT The type of the Object to deserialize to * @param context * @return a deserialized object of the specified type typeOfT which is a subclass of {@code T} * @throws JsonParseException if json is not in the expected format of {@code typeofT} */ @Override public List<JobExecutionPlan> deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException { List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>(); JsonArray jsonArray = json.getAsJsonArray(); for (JsonElement jsonElement: jsonArray) { JsonObject serializedJobExecutionPlan = (JsonObject) jsonElement; JsonObject jobSpecJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.JOB_SPEC_KEY); JsonObject specExecutorJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.SPEC_EXECUTOR_KEY); ExecutionStatus executionStatus = ExecutionStatus.valueOf(serializedJobExecutionPlan. get(SerializationConstants.EXECUTION_STATUS_KEY).getAsString()); String uri = jobSpecJson.get(SerializationConstants.JOB_SPEC_URI_KEY).getAsString(); String version = jobSpecJson.get(SerializationConstants.JOB_SPEC_VERSION_KEY).getAsString(); String description = jobSpecJson.get(SerializationConstants.JOB_SPEC_DESCRIPTION_KEY).getAsString(); String templateURI = jobSpecJson.get(SerializationConstants.JOB_SPEC_TEMPLATE_URI_KEY).getAsString(); String config = jobSpecJson.get(SerializationConstants.JOB_SPEC_CONFIG_KEY).getAsString(); Config jobConfig = ConfigFactory.parseString(config); JobSpec jobSpec; try { JobSpec.Builder builder = (uri == null) ? JobSpec.builder() : JobSpec.builder(uri); builder = (templateURI == null) ? builder : builder.withTemplate(new URI(templateURI)); builder = (version == null) ? builder : builder.withVersion(version); builder = (description == null) ? builder : builder.withDescription(description); jobSpec = builder.withConfig(jobConfig).build(); } catch (URISyntaxException e) { log.error("Error deserializing JobSpec {}", config); throw new RuntimeException(e); } Config specExecutorConfig = ConfigFactory.parseString(specExecutorJson.get(SerializationConstants.SPEC_EXECUTOR_CONFIG_KEY).getAsString()); SpecExecutor specExecutor; try { URI specExecutorUri = new URI(specExecutorConfig.getString(SerializationConstants.SPEC_EXECUTOR_URI_KEY)); specExecutor = this.topologySpecMap.get(specExecutorUri).getSpecExecutor(); } catch (Exception e) { log.error("Error deserializing specExecutor {}", specExecutorConfig); throw new RuntimeException(e); } JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(jobSpec, specExecutor); jobExecutionPlan.setExecutionStatus(executionStatus); JsonElement flowStartTime = serializedJobExecutionPlan.get(SerializationConstants.FLOW_START_TIME_KEY); if (flowStartTime != null) { jobExecutionPlan.setFlowStartTime(flowStartTime.getAsLong()); } try { String jobExecutionFuture = serializedJobExecutionPlan.get(SerializationConstants.JOB_EXECUTION_FUTURE).getAsString(); Future future = specExecutor.getProducer().get().deserializeAddSpecResponse(jobExecutionFuture); jobExecutionPlan.setJobFuture(Optional.fromNullable(future)); } catch (ExecutionException | InterruptedException e) { log.warn("Error during deserialization of JobExecutionFuture."); throw new RuntimeException(e); } jobExecutionPlans.add(jobExecutionPlan); } return jobExecutionPlans; } }
3,884
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlan.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.spec; import java.net.URI; import java.net.URISyntaxException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import org.apache.commons.lang3.StringUtils; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.base.Strings; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigRenderOptions; import com.typesafe.config.ConfigValueFactory; import lombok.Data; import lombok.EqualsAndHashCode; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.DynamicConfigGenerator; import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys; import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry; import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys; import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.util.ConfigUtils; import static org.apache.gobblin.runtime.AbstractJobLauncher.GOBBLIN_JOB_TEMPLATE_KEY; /** * A data class that encapsulates information for executing a job. This includes a {@link JobSpec} and a {@link SpecExecutor} * where the {@link JobSpec} will be executed. */ @Data @EqualsAndHashCode(exclude = {"executionStatus", "currentAttempts", "jobFuture", "flowStartTime"}) public class JobExecutionPlan { public static final String JOB_MAX_ATTEMPTS = "job.maxAttempts"; public static final String JOB_PROPS_KEY = "job.props"; private static final int MAX_JOB_NAME_LENGTH = 128; private final JobSpec jobSpec; private final SpecExecutor specExecutor; private ExecutionStatus executionStatus = ExecutionStatus.PENDING; private final int maxAttempts; private int currentGeneration = 1; private int currentAttempts = 0; private Optional<Future> jobFuture = Optional.absent(); private long flowStartTime = 0L; public static class Factory { public static final String JOB_NAME_COMPONENT_SEPARATION_CHAR = "_"; public JobExecutionPlan createPlan(FlowSpec flowSpec, Config jobConfig, SpecExecutor specExecutor, Long flowExecutionId, Config sysConfig) throws URISyntaxException { try { JobSpec jobSpec = buildJobSpec(flowSpec, jobConfig, flowExecutionId, sysConfig, specExecutor.getConfig().get()); return new JobExecutionPlan(jobSpec, specExecutor); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } /** * Given a resolved job config, this helper method converts the config to a {@link JobSpec}. * @param flowSpec input FlowSpec. * @param jobConfig resolved job config. * @param flowExecutionId flow execution id for the flow * @param sysConfig gobblin service level configs * @param specExecutorConfig configs for the {@link SpecExecutor} of this {@link JobExecutionPlan} * @return a {@link JobSpec} corresponding to the resolved job config. * @throws URISyntaxException if creation of {@link JobSpec} URI fails */ private static JobSpec buildJobSpec(FlowSpec flowSpec, Config jobConfig, Long flowExecutionId, Config sysConfig, Config specExecutorConfig) throws URISyntaxException { Config flowConfig = flowSpec.getConfig(); String flowName = ConfigUtils.getString(flowConfig, ConfigurationKeys.FLOW_NAME_KEY, ""); String flowGroup = ConfigUtils.getString(flowConfig, ConfigurationKeys.FLOW_GROUP_KEY, ""); String flowFailureOption = ConfigUtils.getString(flowConfig, ConfigurationKeys.FLOW_FAILURE_OPTION, DagManager.DEFAULT_FLOW_FAILURE_OPTION); String flowInputPath = ConfigUtils.getString(flowConfig, DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX + "." + DatasetDescriptorConfigKeys.PATH_KEY, ""); Long flowModTime = ConfigUtils.getLong(flowConfig, FlowSpec.MODIFICATION_TIME_KEY, 0L); String jobName = ConfigUtils.getString(jobConfig, ConfigurationKeys.JOB_NAME_KEY, ""); String edgeId = ConfigUtils.getString(jobConfig, FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, ""); // Modify the job name to include the flow group, flow name, edge id, and a random string to avoid collisions since // job names are assumed to be unique within a dag. int hash = flowInputPath.hashCode(); jobName = Joiner.on(JOB_NAME_COMPONENT_SEPARATION_CHAR).join(flowGroup, flowName, jobName, edgeId, hash); // jobNames are commonly used as a directory name, which is limited to 255 characters (account for potential prefixes added/file name lengths) if (jobName.length() >= MAX_JOB_NAME_LENGTH) { // shorten job length but make it uniquely identifiable in multihop flows or concurrent jobs, max length 139 characters (128 flow group + hash) jobName = Joiner.on(JOB_NAME_COMPONENT_SEPARATION_CHAR).join(flowGroup, jobName.hashCode()); } JobSpec.Builder jobSpecBuilder = JobSpec.builder(jobSpecURIGenerator(flowGroup, jobName, flowSpec)).withConfig(jobConfig) .withDescription(flowSpec.getDescription()).withVersion(flowSpec.getVersion()); //Get job template uri URI jobTemplateUri = new URI(jobConfig.getString(ConfigurationKeys.JOB_TEMPLATE_PATH)); JobSpec jobSpec = jobSpecBuilder.withTemplate(jobTemplateUri).build(); jobSpec.setConfig(jobSpec.getConfig() //Add flowGroup to job spec .withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef(flowGroup)) //Add flowName to job spec .withValue(ConfigurationKeys.FLOW_NAME_KEY, ConfigValueFactory.fromAnyRef(flowName)) //Add flow execution id .withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId)) // Remove schedule due to namespace conflict with azkaban schedule key, but still keep track if flow is scheduled or not .withValue(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, ConfigValueFactory.fromAnyRef(jobSpec.getConfig().hasPath(ConfigurationKeys.JOB_SCHEDULE_KEY))) .withoutPath(ConfigurationKeys.JOB_SCHEDULE_KEY) //Remove template uri .withoutPath(GOBBLIN_JOB_TEMPLATE_KEY) // Add job.name and job.group .withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef(jobName)) .withValue(ConfigurationKeys.JOB_GROUP_KEY, ConfigValueFactory.fromAnyRef(flowGroup)) //Add flow failure option .withValue(ConfigurationKeys.FLOW_FAILURE_OPTION, ConfigValueFactory.fromAnyRef(flowFailureOption)) .withValue(ConfigurationKeys.FLOW_EDGE_ID_KEY, ConfigValueFactory.fromAnyRef(edgeId)) .withValue(FlowSpec.MODIFICATION_TIME_KEY, ConfigValueFactory.fromAnyRef(flowModTime)) ); //Add tracking config to JobSpec. addTrackingEventConfig(jobSpec, sysConfig); addAdditionalConfig(jobSpec, sysConfig, specExecutorConfig); // Add dynamic config to jobSpec if a dynamic config generator is specified in sysConfig DynamicConfigGenerator dynamicConfigGenerator = DynamicConfigGeneratorFactory.createDynamicConfigGenerator(sysConfig); Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(jobSpec.getConfig().withFallback(sysConfig)); jobSpec.setConfig(dynamicConfig.withFallback(jobSpec.getConfig())); // Reset properties in Spec from Config jobSpec.setConfigAsProperties(ConfigUtils.configToProperties(jobSpec.getConfig())); return jobSpec; } /** * A method to add any additional configurations to a JobSpec which need to be passed to the {@link SpecExecutor}. * This enables {@link org.apache.gobblin.metrics.GobblinTrackingEvent}s to be emitted from each Gobblin job * orchestrated by Gobblin-as-a-Service, which will then be used for tracking the execution status of the job. * @param jobSpec representing a fully resolved {@link JobSpec}. * @param sysConfig gobblin service level configs * @param specExecutorConfig configs for the {@link SpecExecutor} of this {@link JobExecutionPlan} */ private static void addAdditionalConfig(JobSpec jobSpec, Config sysConfig, Config specExecutorConfig) { if (!(sysConfig.hasPath(ConfigurationKeys.SPECEXECUTOR_CONFIGS_PREFIX_KEY) && !Strings.isNullOrEmpty(ConfigUtils.getString(sysConfig, ConfigurationKeys.SPECEXECUTOR_CONFIGS_PREFIX_KEY, "")) && sysConfig.hasPath(sysConfig.getString(ConfigurationKeys.SPECEXECUTOR_CONFIGS_PREFIX_KEY)))) { return; } String additionalConfigsPrefix = sysConfig.getString(ConfigurationKeys.SPECEXECUTOR_CONFIGS_PREFIX_KEY); Config config = jobSpec.getConfig().withFallback(ConfigUtils.getConfigOrEmpty(sysConfig, additionalConfigsPrefix)); config = config.withFallback(ConfigUtils.getConfigOrEmpty(specExecutorConfig, additionalConfigsPrefix)); if (!config.isEmpty()) { jobSpec.setConfig(config); } } /** * A method to add tracking event configurations to a JobSpec. * This enables {@link org.apache.gobblin.metrics.GobblinTrackingEvent}s * to be emitted from each Gobblin job orchestrated by Gobblin-as-a-Service, which will then be used for tracking the * execution status of the job. * @param jobSpec representing a fully resolved {@link JobSpec}. */ private static void addTrackingEventConfig(JobSpec jobSpec, Config sysConfig) { Config reportingConfig = ConfigUtils.getConfig(sysConfig, ConfigurationKeys.METRICS_REPORTING_CONFIGURATIONS_PREFIX, ConfigFactory.empty()); if (!reportingConfig.isEmpty()) { Config jobConfig = jobSpec.getConfig().withFallback(reportingConfig.atPath(ConfigurationKeys.METRICS_REPORTING_CONFIGURATIONS_PREFIX)); boolean isSchemaRegistryEnabled = ConfigUtils.getBoolean(sysConfig, ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, false); if (isSchemaRegistryEnabled) { String schemaRegistryUrl = ConfigUtils.getString(sysConfig, KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, ""); if (!Strings.isNullOrEmpty(schemaRegistryUrl)) { jobConfig = jobConfig.withValue(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, ConfigValueFactory.fromAnyRef(schemaRegistryUrl)); } String schemaOverrideNamespace = ConfigUtils .getString(sysConfig, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE, ""); if (!Strings.isNullOrEmpty(schemaOverrideNamespace)) { jobConfig = jobConfig.withValue(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE, ConfigValueFactory.fromAnyRef(schemaOverrideNamespace)); } } jobSpec.setConfig(jobConfig); } } /** * A naive implementation of generating a jobSpec's URI within a multi-hop flow that follows the convention: * <JOB_CATALOG_SCHEME>/{@link ConfigurationKeys#JOB_GROUP_KEY}/{@link ConfigurationKeys#JOB_NAME_KEY}. */ private static URI jobSpecURIGenerator(String jobGroup, String jobName, FlowSpec flowSpec) throws URISyntaxException { return new URI(JobSpec.Builder.DEFAULT_JOB_CATALOG_SCHEME, flowSpec.getUri().getAuthority(), StringUtils.appendIfMissing(StringUtils.prependIfMissing(flowSpec.getUri().getPath(), "/"), "/") + jobGroup + "/" + jobName, null); } } public JobExecutionPlan(JobSpec jobSpec, SpecExecutor specExecutor) { this.jobSpec = jobSpec; this.specExecutor = specExecutor; this.maxAttempts = ConfigUtils.getInt(jobSpec.getConfig(), JOB_MAX_ATTEMPTS, 1); } /** * Render the JobSpec into a JSON string. * @return a valid JSON string representation of the JobSpec. */ @Override public String toString() { return jobSpec.getConfig().root().render(ConfigRenderOptions.concise()); } }
3,885
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanDagFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.spec; import com.google.common.collect.Maps; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.orchestration.DagManagerUtils; /** * A Factory class used for constructing a {@link Dag} of {@link JobExecutionPlan}s from * a {@link List} of {@link JobExecutionPlan}s. */ @Alpha @Slf4j public class JobExecutionPlanDagFactory { public Dag<JobExecutionPlan> createDag(List<JobExecutionPlan> jobExecutionPlans) { //Maintain a mapping between job name and the corresponding JobExecutionPlan. Map<String, Dag.DagNode<JobExecutionPlan>> jobExecutionPlanMap = Maps.newHashMapWithExpectedSize(jobExecutionPlans.size()); List<Dag.DagNode<JobExecutionPlan>> dagNodeList = new ArrayList<>(jobExecutionPlans.size()); /** * Create a {@link Dag.DagNode<JobExecutionPlan>} for every {@link JobSpec} in the flow. Add this node * to a HashMap. */ for (JobExecutionPlan jobExecutionPlan : jobExecutionPlans) { Dag.DagNode<JobExecutionPlan> dagNode = new Dag.DagNode<>(jobExecutionPlan); dagNodeList.add(dagNode); String jobName = getJobName(jobExecutionPlan); if (jobName != null) { jobExecutionPlanMap.put(jobName, dagNode); } } /** * Iterate over each {@link JobSpec} to get the dependencies of each {@link JobSpec}. * For each {@link JobSpec}, get the corresponding {@link Dag.DagNode} and * set the {@link Dag.DagNode}s corresponding to its dependencies as its parent nodes. * * TODO: we likely do not need 2 for loops and we can do this in 1 pass. */ List<String> jobNames = new ArrayList<>(); for (JobExecutionPlan jobExecutionPlan : jobExecutionPlans) { String jobName = getJobName(jobExecutionPlan); if (jobName == null) { continue; } jobNames.add(jobName); Dag.DagNode<JobExecutionPlan> node = jobExecutionPlanMap.get(jobName); Collection<String> dependencies = getDependencies(jobExecutionPlan.getJobSpec().getConfig()); for (String dependency : dependencies) { Dag.DagNode<JobExecutionPlan> parentNode = jobExecutionPlanMap.get(dependency); node.addParentNode(parentNode); } } Dag<JobExecutionPlan> dag = new Dag<>(dagNodeList); if (!dagNodeList.isEmpty()) { log.info("Dag plan created with id {} and jobs: {}", DagManagerUtils.generateDagId(dag), jobNames); } else { log.info("Empty dag plan created for execution plans {}", jobExecutionPlans); } return dag; } /** * Get job dependencies of a given job from its config. * @param config of a job. * @return a list of dependencies of the job. */ private static List<String> getDependencies(Config config) { return config.hasPath(ConfigurationKeys.JOB_DEPENDENCIES) ? Arrays .asList(config.getString(ConfigurationKeys.JOB_DEPENDENCIES).split(",")) : new ArrayList<>(0); } /** * The job name is derived from the {@link ConfigurationKeys#JOB_NAME_KEY} config. It is assumed to be unique * across all jobs in a {@link Dag}. * @param jobExecutionPlan * @return the name of the job. */ private static String getJobName(JobExecutionPlan jobExecutionPlan) { return jobExecutionPlan.getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY); } }
3,886
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/scheduler/GobblinServiceJobScheduler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.scheduler; import com.codahale.metrics.MetricFilter; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.collect.Maps; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.io.IOException; import java.net.URI; import java.text.ParseException; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.Calendar; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.TimeZone; import javax.inject.Inject; import javax.inject.Named; import javax.inject.Singleton; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang.StringUtils; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecCatalogListener; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.runtime.metrics.RuntimeMetrics; import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog; import org.apache.gobblin.runtime.util.InjectionNames; import org.apache.gobblin.scheduler.BaseGobblinJob; import org.apache.gobblin.scheduler.JobScheduler; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.FlowTriggerHandler; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.service.modules.orchestration.UserQuotaManager; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PropertiesUtils; import org.apache.helix.HelixManager; import org.quartz.CronExpression; import org.quartz.DisallowConcurrentExecution; import org.quartz.InterruptableJob; import org.quartz.JobDataMap; import org.quartz.JobDetail; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.quartz.SchedulerException; import org.quartz.Trigger; import org.quartz.UnableToInterruptJobException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.gobblin.service.ServiceConfigKeys.GOBBLIN_SERVICE_PREFIX; /** * An extension to {@link JobScheduler} that is also a {@link SpecCatalogListener}. * {@link GobblinServiceJobScheduler} listens for new / updated {@link FlowSpec} and schedules * and runs them via {@link Orchestrator}. */ @Alpha @Singleton @Slf4j public class GobblinServiceJobScheduler extends JobScheduler implements SpecCatalogListener { // Scheduler related configuration // A boolean function indicating if current instance will handle DR traffic or not. public static final String GOBBLIN_SERVICE_SCHEDULER_DR_NOMINATED = GOBBLIN_SERVICE_PREFIX + "drNominatedInstance"; protected final Logger _log; protected final Optional<FlowCatalog> flowCatalog; protected final Optional<HelixManager> helixManager; protected final Orchestrator orchestrator; protected final Boolean warmStandbyEnabled; protected final Optional<UserQuotaManager> quotaManager; protected final Optional<FlowTriggerHandler> flowTriggerHandler; @Getter protected final Map<String, Spec> scheduledFlowSpecs; @Getter protected final Map<String, Long> lastUpdatedTimeForFlowSpec; protected volatile int loadSpecsBatchSize = -1; protected int skipSchedulingFlowsAfterNumDays; @Getter private volatile boolean isActive; private String serviceName; private volatile Long perSpecGetRateValue = -1L; private volatile Long timeToInitializeSchedulerValue = -1L; private volatile Long timeToObtainSpecUrisValue = -1L; private volatile Long individualGetSpecSpeedValue = -1L; private volatile Long eachCompleteAddSpecValue = -1L; private volatile Long eachSpecCompilationValue = -1L; private volatile Long eachScheduleJobValue = -1L; private volatile Long totalGetSpecTimeValue = -1L; private volatile Long totalAddSpecTimeValue = -1L; private volatile int numJobsScheduledDuringStartupValue = -1; private final ContextAwareGauge getSpecsPerSpecRateNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_GET_SPECS_DURING_STARTUP_PER_SPEC_RATE_NANOS, () -> this.perSpecGetRateValue); private final ContextAwareGauge batchSize = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_LOAD_SPECS_BATCH_SIZE, () -> this.loadSpecsBatchSize); private final ContextAwareGauge timeToInitalizeSchedulerNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_TIME_TO_INITIALIZE_SCHEDULER_NANOS, () -> this.timeToInitializeSchedulerValue); private final ContextAwareGauge timeToObtainSpecUrisNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_TIME_TO_OBTAIN_SPEC_URIS_NANOS, () -> timeToObtainSpecUrisValue); private final ContextAwareGauge individualGetSpecSpeedNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_INDIVIDUAL_GET_SPEC_SPEED_NANOS, () -> individualGetSpecSpeedValue); private final ContextAwareGauge eachCompleteAddSpecNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_EACH_COMPLETE_ADD_SPEC_NANOS, () -> eachCompleteAddSpecValue); private final ContextAwareGauge eachSpecCompilationNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_EACH_SPEC_COMPILATION_NANOS, () -> eachSpecCompilationValue); private final ContextAwareGauge eachScheduleJobNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_EACH_SCHEDULE_JOB_NANOS, () -> eachScheduleJobValue); private final ContextAwareGauge totalGetSpecTimeNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_TOTAL_GET_SPEC_TIME_NANOS, () -> totalGetSpecTimeValue); private final ContextAwareGauge totalAddSpecTimeNanos = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_TOTAL_ADD_SPEC_TIME_NANOS, () -> totalAddSpecTimeValue); private final ContextAwareGauge numJobsScheduledDuringStartup = metricContext.newContextAwareGauge(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_NUM_JOBS_SCHEDULED_DURING_STARTUP, () -> numJobsScheduledDuringStartupValue); private static final MetricContext metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(), GobblinServiceJobScheduler.class); private static final ContextAwareMeter scheduledFlows = metricContext.contextAwareMeter(ServiceMetricNames.SCHEDULED_FLOW_METER); private static final ContextAwareMeter nonScheduledFlows = metricContext.contextAwareMeter(ServiceMetricNames.NON_SCHEDULED_FLOW_METER); /** * If current instances is nominated as a handler for DR traffic from down GaaS-Instance. * Note this is, currently, different from leadership change/fail-over handling, where the traffic could come * from GaaS instance out of current GaaS Cluster: * e.g. There are multi-datacenter deployment of GaaS Cluster. Intra-datacenter fail-over could be handled by * leadership change mechanism, while inter-datacenter fail-over would be handled by DR handling mechanism. */ private boolean isNominatedDRHandler; /** * Use this to tag all DR-applicable FlowSpec entries in {@link org.apache.gobblin.runtime.api.SpecStore} * so only they would be loaded during DR handling. */ public static final String DR_FILTER_TAG = "dr"; @Inject public GobblinServiceJobScheduler(@Named(InjectionNames.SERVICE_NAME) String serviceName, Config config, Optional<HelixManager> helixManager, Optional<FlowCatalog> flowCatalog, Optional<TopologyCatalog> topologyCatalog, Orchestrator orchestrator, SchedulerService schedulerService, Optional<UserQuotaManager> quotaManager, Optional<Logger> log, @Named(InjectionNames.WARM_STANDBY_ENABLED) boolean warmStandbyEnabled, Optional<FlowTriggerHandler> flowTriggerHandler) throws Exception { super(ConfigUtils.configToProperties(config), schedulerService); _log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass()); this.serviceName = serviceName; this.flowCatalog = flowCatalog; this.helixManager = helixManager; this.orchestrator = orchestrator; this.scheduledFlowSpecs = Maps.newHashMap(); this.lastUpdatedTimeForFlowSpec = Maps.newHashMap(); this.loadSpecsBatchSize = Integer.parseInt(ConfigUtils.configToProperties(config).getProperty(ConfigurationKeys.LOAD_SPEC_BATCH_SIZE, String.valueOf(ConfigurationKeys.DEFAULT_LOAD_SPEC_BATCH_SIZE))); this.skipSchedulingFlowsAfterNumDays = Integer.parseInt(ConfigUtils.configToProperties(config).getProperty(ConfigurationKeys.SKIP_SCHEDULING_FLOWS_AFTER_NUM_DAYS, String.valueOf(ConfigurationKeys.DEFAULT_NUM_DAYS_TO_SKIP_AFTER))); this.isNominatedDRHandler = config.hasPath(GOBBLIN_SERVICE_SCHEDULER_DR_NOMINATED) && config.hasPath(GOBBLIN_SERVICE_SCHEDULER_DR_NOMINATED); this.warmStandbyEnabled = warmStandbyEnabled; this.quotaManager = quotaManager; this.flowTriggerHandler = flowTriggerHandler; // Check that these metrics do not exist before adding, mainly for testing purpose which creates multiple instances // of the scheduler. If one metric exists, then the others should as well. MetricFilter filter = MetricFilter.contains(RuntimeMetrics.GOBBLIN_JOB_SCHEDULER_GET_SPECS_DURING_STARTUP_PER_SPEC_RATE_NANOS); if (metricContext.getGauges(filter).isEmpty()) { metricContext.register(this.getSpecsPerSpecRateNanos); metricContext.register(this.batchSize); metricContext.register(this.timeToInitalizeSchedulerNanos); metricContext.register(this.timeToObtainSpecUrisNanos); metricContext.register(this.individualGetSpecSpeedNanos); metricContext.register(this.eachCompleteAddSpecNanos); metricContext.register(this.eachSpecCompilationNanos); metricContext.register(this.eachScheduleJobNanos); metricContext.register(this.totalGetSpecTimeNanos); metricContext.register(this.totalAddSpecTimeNanos); metricContext.register(this.numJobsScheduledDuringStartup); } } public GobblinServiceJobScheduler(String serviceName, Config config, FlowStatusGenerator flowStatusGenerator, Optional<HelixManager> helixManager, Optional<FlowCatalog> flowCatalog, Optional<TopologyCatalog> topologyCatalog, Optional<DagManager> dagManager, Optional<UserQuotaManager> quotaManager, SchedulerService schedulerService, Optional<Logger> log, boolean warmStandbyEnabled, Optional <FlowTriggerHandler> flowTriggerHandler, SharedFlowMetricsSingleton sharedFlowMetricsSingleton) throws Exception { this(serviceName, config, helixManager, flowCatalog, topologyCatalog, new Orchestrator(config, flowStatusGenerator, topologyCatalog, dagManager, log, flowTriggerHandler, sharedFlowMetricsSingleton), schedulerService, quotaManager, log, warmStandbyEnabled, flowTriggerHandler); } public synchronized void setActive(boolean isActive) { if (this.isActive == isActive) { // No-op if already in correct state return; } // Since we are going to change status to isActive=true, schedule all flows if (isActive) { // Need to set active=true first; otherwise in the onAddSpec(), node will forward specs to active node, which is itself. this.isActive = isActive; if (this.flowCatalog.isPresent()) { // Load spec asynchronously and make scheduler be aware of that. Thread scheduleSpec = new Thread(new Runnable() { @Override public void run() { // Ensure compiler is healthy before attempting to schedule flows try { GobblinServiceJobScheduler.this.orchestrator.getSpecCompiler().awaitHealthy(); } catch (InterruptedException e) { throw new RuntimeException(e); } scheduleSpecsFromCatalog(); } }); scheduleSpec.start(); } } else { // Since we are going to change status to isActive=false, unschedule all flows try { this.scheduledFlowSpecs.clear(); unscheduleAllJobs(); } catch (SchedulerException e) { _log.error(String.format("Not all jobs were unscheduled"), e); // We want to avoid duplicate flow execution, so fail loudly throw new RuntimeException(e); } // Need to set active=false at the end; otherwise in the onDeleteSpec(), node will forward specs to active node, which is itself. this.isActive = isActive; } } /** Return true if a spec should be scheduled and if it is, modify the spec of an adhoc flow before adding to * scheduler. Return false otherwise. */ private boolean addSpecHelperMethod(Spec spec) { // Adhoc flows will not have any job schedule key, but we should schedule them if (spec instanceof FlowSpec) { FlowSpec flowSpec = (FlowSpec) spec; if (!flowSpec.getConfig().hasPath(ConfigurationKeys.JOB_SCHEDULE_KEY) || isWithinRange( flowSpec.getConfig().getString(ConfigurationKeys.JOB_SCHEDULE_KEY), this.skipSchedulingFlowsAfterNumDays)) { // Disable FLOW_RUN_IMMEDIATELY on service startup or leadership change if the property is set to true if (PropertiesUtils.getPropAsBoolean(flowSpec.getConfigAsProperties(), ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false")) { Spec modifiedSpec = disableFlowRunImmediatelyOnStart((FlowSpec) spec); onAddSpec(modifiedSpec); } else { onAddSpec(spec); } return true; } }else { _log.debug("Not scheduling spec {} during startup as next job to schedule is outside of threshold.", spec); } return false; } /** * Returns true if next run for the given cron schedule is sooner than the threshold to skip scheduling after, false * otherwise. If the cron expression cannot be parsed and the next run cannot be calculated returns true to schedule. * @param cronExpression * @return num days until next run, max integer in the case it cannot be calculated */ @VisibleForTesting public static boolean isWithinRange(String cronExpression, int maxNumDaysToScheduleWithin) { if (cronExpression.trim().isEmpty()) { // If the cron expression is empty return true to capture adhoc flows return true; } CronExpression cron = null; Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); double numMillisInADay = 86400000; try { cron = new CronExpression(cronExpression); cron.setTimeZone(TimeZone.getTimeZone("UTC")); Date nextValidTimeAfter = cron.getNextValidTimeAfter(new Date()); if (nextValidTimeAfter == null) { log.warn("Next valid time doesn't exist since it's out of range for expression: {}. ", cronExpression); // nextValidTimeAfter will be null in cases only when CronExpression is outdated for a given range // this will cause NullPointerException while scheduling FlowSpecs from FlowCatalog // Hence, returning false to avoid expired flows from being scheduled return false; } cal.setTime(nextValidTimeAfter); long diff = cal.getTimeInMillis() - System.currentTimeMillis(); return (int) Math.round(diff / numMillisInADay) < maxNumDaysToScheduleWithin; } catch (ParseException e) { e.printStackTrace(); // Return false when a parsing exception occurs due to invalid cron return false; } } /** * Load all {@link FlowSpec}s from {@link FlowCatalog} as one of the initialization step, * and make schedulers be aware of that. * * If it is newly brought up as the DR handler, will load additional FlowSpecs and handle transition properly. */ private void scheduleSpecsFromCatalog() { int numSpecs = this.flowCatalog.get().getSize(); int actualNumFlowsScheduled = 0; _log.info("Scheduling specs from catalog: {} flows in the catalog, will skip scheduling flows with next run after " + "{} days", numSpecs, this.skipSchedulingFlowsAfterNumDays); long startTime = System.nanoTime(); long totalGetTime = 0; long totalAddSpecTime = 0; Iterator<URI> uriIterator; HashSet<URI> urisLeftToSchedule = new HashSet<>(); try { uriIterator = this.flowCatalog.get().getSpecURIs(); while (uriIterator.hasNext()) { urisLeftToSchedule.add(uriIterator.next()); } } catch (IOException e) { throw new RuntimeException(e); } this.timeToObtainSpecUrisValue = System.nanoTime() - startTime; try { // If current instances nominated as DR handler, will take additional URIS from FlowCatalog. if (isNominatedDRHandler) { // Synchronously cleaning the execution state for DR-applicable FlowSpecs // before rescheduling the again in nominated DR-Hanlder. Iterator<URI> drUris = this.flowCatalog.get().getSpecURISWithTag(DR_FILTER_TAG); clearRunningFlowState(drUris); } } catch (IOException e) { throw new RuntimeException("Failed to get Spec URIs with tag to clear running flow state", e); } int startOffset = 0; long batchGetStartTime; long batchGetEndTime; while (startOffset < numSpecs) { batchGetStartTime = System.nanoTime(); Collection<Spec> batchOfSpecs = this.flowCatalog.get().getSpecsPaginated(startOffset, this.loadSpecsBatchSize); Iterator<Spec> batchOfSpecsIterator = batchOfSpecs.iterator(); batchGetEndTime = System.nanoTime(); while (batchOfSpecsIterator.hasNext()) { Spec spec = batchOfSpecsIterator.next(); try { if (addSpecHelperMethod(spec)) { totalAddSpecTime += this.eachCompleteAddSpecValue; // this is updated by each call to onAddSpec actualNumFlowsScheduled += 1; } } catch (Exception e) { // If there is an uncaught error thrown during compilation, log it and continue adding flows _log.error("Could not schedule spec {} from flowCatalog due to ", spec, e); } urisLeftToSchedule.remove(spec.getUri()); } startOffset += this.loadSpecsBatchSize; totalGetTime += batchGetEndTime - batchGetStartTime; // Don't skew the average get spec time value with the last batch that may be very small if (startOffset == 0 || batchOfSpecs.size() >= Math.round(0.75 * this.loadSpecsBatchSize)) { perSpecGetRateValue = (batchGetEndTime - batchGetStartTime) / batchOfSpecs.size(); } } // Ensure we did not miss any specs due to ordering changing (deletions/insertions) while loading Iterator<URI> urisLeft = urisLeftToSchedule.iterator(); long individualGetSpecStartTime; while (urisLeft.hasNext()) { URI uri = urisLeft.next(); try { individualGetSpecStartTime = System.nanoTime(); Spec spec = this.flowCatalog.get().getSpecWrapper(uri); this.individualGetSpecSpeedValue = System.nanoTime() - individualGetSpecStartTime; totalGetTime += this.individualGetSpecSpeedValue; if (addSpecHelperMethod(spec)) { totalAddSpecTime += this.eachCompleteAddSpecValue; // this is updated by each call to onAddSpec actualNumFlowsScheduled += 1; } } catch (Exception e) { // If there is an uncaught error thrown during compilation, log it and continue adding flows _log.error("Could not schedule spec uri {} from flowCatalog due to {}", uri, e); } } // Reset value after its last value to get an accurate reading this.perSpecGetRateValue = -1L; this.individualGetSpecSpeedValue = -1L; this.totalGetSpecTimeValue = totalGetTime; this.totalAddSpecTimeValue = totalAddSpecTime; this.numJobsScheduledDuringStartupValue = actualNumFlowsScheduled; this.flowCatalog.get().getMetrics().updateGetSpecTime(startTime); this.timeToInitializeSchedulerValue = System.nanoTime() - startTime; } /** * In DR-mode, the running {@link FlowSpec} will all be cancelled and rescheduled. * We will need to make sure that running {@link FlowSpec}s' state are cleared, and corresponding running jobs are * killed before rescheduling them. * @param drUris The uris that applicable for DR discovered from FlowCatalog. */ private void clearRunningFlowState(Iterator<URI> drUris) { while (drUris.hasNext()) { // TODO: Instead of simply call onDeleteSpec, a callback when FlowSpec is deleted from FlowCatalog, should also kill Azkaban Flow from AzkabanSpecProducer. onDeleteSpec(drUris.next(), FlowSpec.Builder.DEFAULT_VERSION); } } @VisibleForTesting protected static Spec disableFlowRunImmediatelyOnStart(FlowSpec spec) { Properties properties = spec.getConfigAsProperties(); properties.setProperty(ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false"); Config config = ConfigFactory.parseProperties(properties); return new FlowSpec(spec.getUri(), spec.getVersion(), spec.getDescription(), config, properties, spec.getTemplateURIs(), spec.getChildSpecs()); } @Override protected void startUp() throws Exception { super.startUp(); } /** * Synchronize the job scheduling because the same flowSpec can be scheduled by different threads. */ @Override public synchronized void scheduleJob(Properties jobProps, JobListener jobListener) throws JobException { Map<String, Object> additionalJobDataMap = Maps.newHashMap(); additionalJobDataMap.put(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWSPEC, this.scheduledFlowSpecs.get(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY))); try { scheduleJob(jobProps, jobListener, additionalJobDataMap, GobblinServiceJob.class); } catch (Exception e) { throw new JobException("Failed to schedule job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e); } } @Override protected void logNewlyScheduledJob(JobDetail job, Trigger trigger) { Properties jobProps = (Properties) job.getJobDataMap().get(PROPERTIES_KEY); log.info(jobSchedulerTracePrefixBuilder(jobProps) + "nextTriggerTime: {} - Job newly scheduled", utcDateAsUTCEpochMillis(trigger.getNextFireTime())); } protected static String jobSchedulerTracePrefixBuilder(Properties jobProps) { return String.format("Scheduler trigger tracing (in epoch-ms UTC): [flowName: %s flowGroup: %s] - ", jobProps.getProperty(ConfigurationKeys.FLOW_NAME_KEY, "<<no flow name>>"), jobProps.getProperty(ConfigurationKeys.FLOW_GROUP_KEY, "<<no flow group>>")); } /** * Takes a Date object in system default time zone, converts it to UTC before returning the number of milliseconds * since epoch * @param date */ public static long systemDefaultZoneDateAsUTCEpochMillis(Date date) { return ZonedDateTime.of( LocalDateTime.ofInstant(date.toInstant(), ZoneId.systemDefault()), ZoneOffset.UTC).toInstant().toEpochMilli(); } /** * Takes a Date object in UTC and returns the number of milliseconds since epoch * @param date */ public static long utcDateAsUTCEpochMillis(Date date) { return date.toInstant().toEpochMilli(); } @Override public void runJob(Properties jobProps, JobListener jobListener) throws JobException { try { Spec flowSpec = this.scheduledFlowSpecs.get(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)); // We always expect the trigger event time to be set so the flow will be skipped by the orchestrator if it is not String triggerTimestampMillis = jobProps.getProperty( ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_MILLIS_KEY, ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_NEVER_SET_VAL); boolean isReminderEvent = Boolean.parseBoolean(jobProps.getProperty(ConfigurationKeys.FLOW_IS_REMINDER_EVENT_KEY, "false")); this.orchestrator.orchestrate(flowSpec, jobProps, Long.parseLong(triggerTimestampMillis), isReminderEvent); } catch (Exception e) { String exceptionPrefix = "Failed to run Spec: " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY); log.warn(exceptionPrefix + " because", e); throw new JobException(exceptionPrefix, e); } } /** * * @param addedSpec spec to be added * @return add spec response, which contains <code>null</code> if there is an error */ @Override public AddSpecResponse onAddSpec(Spec addedSpec) { long startTime = System.nanoTime(); if (this.helixManager.isPresent() && !this.helixManager.get().isConnected()) { // Specs in store will be notified when Scheduler is added as listener to FlowCatalog, so ignore // .. Specs if in cluster mode and Helix is not yet initialized _log.info("System not yet initialized. Skipping Spec Addition: " + addedSpec); return null; } _log.info("New Flow Spec detected: " + addedSpec); if (!(addedSpec instanceof FlowSpec)) { return null; } FlowSpec flowSpec = (FlowSpec) addedSpec; URI flowSpecUri = flowSpec.getUri(); Properties jobConfig = createJobConfig(flowSpec); boolean isExplain = flowSpec.isExplain(); String response = null; long compileStartTime = System.nanoTime(); // always try to compile the flow to verify if it is compilable Dag<JobExecutionPlan> dag = this.orchestrator.getSpecCompiler().compileFlow(flowSpec); this.eachSpecCompilationValue = System.nanoTime() - compileStartTime; // If dag is null then a compilation error has occurred if (dag != null && !dag.isEmpty()) { response = dag.toString(); } boolean compileSuccess = FlowCatalog.isCompileSuccessful(response); if (isExplain || !compileSuccess || !this.isActive) { // todo: in case of a scheduled job, we should also check if the job schedule is a valid cron schedule // so it can be scheduled _log.info("Ignoring the spec {}. isExplain: {}, compileSuccess: {}, master: {}", addedSpec, isExplain, compileSuccess, this.isActive); return new AddSpecResponse<>(response); } // Check quota limits against adhoc flows before saving the schedule // In warm standby mode, this quota check will happen on restli API layer when we accept the flow if (!this.warmStandbyEnabled && !jobConfig.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) { // This block should be reachable only for the execution for the adhoc flows // For flow that has scheduler but run-immediately set to be true, we won't check the quota as we will use a different execution id later if (quotaManager.isPresent()) { // QuotaManager has idempotent checks for a dagNode, so this check won't double add quotas for a flow in the DagManager try { quotaManager.get().checkQuota(dag.getStartNodes()); } catch (IOException e) { throw new RuntimeException(e); } } } // Compare the modification timestamp of the spec being added if the scheduler is being initialized, ideally we // don't even want to do the same update twice as it will kill the existing flow and reschedule it unnecessarily Long modificationTime = Long.valueOf(flowSpec.getConfigAsProperties().getProperty(FlowSpec.MODIFICATION_TIME_KEY, "0")); String uriString = flowSpec.getUri().toString(); Boolean isRunImmediately = PropertiesUtils.getPropAsBoolean(jobConfig, ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false"); // If the modification time is 0 (which means the original API was used to retrieve spec or warm standby mode is not // enabled), spec not in scheduler, or have a modification time associated with it assume it's the most recent if (modificationTime != 0L && this.scheduledFlowSpecs.containsKey(uriString) && this.lastUpdatedTimeForFlowSpec.containsKey(uriString)) { // For run-immediately flows with a schedule the modified_time would remain the same if (this.lastUpdatedTimeForFlowSpec.get(uriString).compareTo(modificationTime) > 0 || (this.lastUpdatedTimeForFlowSpec.get(uriString).equals(modificationTime) && !isRunImmediately)) { _log.warn("Ignoring the spec {} modified at time {} because we have a more updated version from time {}", addedSpec, modificationTime,this.lastUpdatedTimeForFlowSpec.get(uriString)); this.eachCompleteAddSpecValue = System.nanoTime() - startTime; return new AddSpecResponse(response); } } // todo : we should probably not schedule a flow if it is a runOnce flow this.scheduledFlowSpecs.put(flowSpecUri.toString(), addedSpec); this.lastUpdatedTimeForFlowSpec.put(flowSpecUri.toString(), modificationTime); if (jobConfig.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) { _log.info("{} Scheduling flow spec: {} ", this.serviceName, addedSpec); try { long scheduleStartTime = System.nanoTime(); scheduleJob(jobConfig, null); this.eachScheduleJobValue = System.nanoTime() - scheduleStartTime; } catch (JobException je) { _log.error("{} Failed to schedule or run FlowSpec {}", serviceName, addedSpec, je); this.scheduledFlowSpecs.remove(addedSpec.getUri().toString()); this.lastUpdatedTimeForFlowSpec.remove(flowSpecUri.toString()); this.eachCompleteAddSpecValue = System.nanoTime() - startTime; return null; } if (PropertiesUtils.getPropAsBoolean(jobConfig, ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false")) { _log.info("RunImmediately requested, hence executing FlowSpec: " + addedSpec); this.jobExecutor.execute(new NonScheduledJobRunner(flowSpecUri, false, jobConfig, null)); } } else { _log.info("No FlowSpec schedule found, so running FlowSpec: " + addedSpec); // Use 0 for trigger event time of an adhoc flow jobConfig.setProperty(ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_MILLIS_KEY, "0"); this.jobExecutor.execute(new NonScheduledJobRunner(flowSpecUri, true, jobConfig, null)); } this.eachCompleteAddSpecValue = System.nanoTime() - startTime; return new AddSpecResponse<>(response); } /** * Remove a flowSpec from schedule * Unlike onDeleteSpec, we want to avoid deleting the flowSpec on the executor * and we still want to unschedule if we cannot connect to zookeeper as the current node cannot be the master * @param specURI * @param specVersion */ private void unscheduleSpec(URI specURI, String specVersion) throws JobException { if (this.scheduledFlowSpecs.containsKey(specURI.toString())) { _log.info("Unscheduling flowSpec " + specURI + "/" + specVersion); this.scheduledFlowSpecs.remove(specURI.toString()); this.lastUpdatedTimeForFlowSpec.remove(specURI.toString()); unscheduleJob(specURI.toString()); try { FlowSpec spec = (FlowSpec) this.flowCatalog.get().getSpecs(specURI); Properties properties = spec.getConfigAsProperties(); _log.info(jobSchedulerTracePrefixBuilder(properties) + "Unscheduled Spec"); } catch (SpecNotFoundException e) { _log.warn("Unable to retrieve spec for URI {}", specURI); } } else { throw new JobException(String.format( "Spec with URI: %s was not found in cache. May be it was cleaned, if not please clean it manually", specURI)); } } public void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion) { onDeleteSpec(deletedSpecURI, deletedSpecVersion, new Properties()); } /** {@inheritDoc} */ @Override public void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion, Properties headers) { if (this.helixManager.isPresent() && !this.helixManager.get().isConnected()) { // Specs in store will be notified when Scheduler is added as listener to FlowCatalog, so ignore // .. Specs if in cluster mode and Helix is not yet initialized _log.info("System not yet initialized. Skipping Spec Deletion: " + deletedSpecURI); return; } _log.info("Spec deletion detected: " + deletedSpecURI + "/" + deletedSpecVersion); if (!this.isActive) { _log.info("Skipping deletion of this spec {}/{} for non-leader host", deletedSpecURI, deletedSpecVersion); return; } try { Spec deletedSpec = this.scheduledFlowSpecs.get(deletedSpecURI.toString()); unscheduleSpec(deletedSpecURI, deletedSpecVersion); this.orchestrator.remove(deletedSpec, headers); } catch (JobException | IOException e) { _log.warn(String.format("Spec with URI: %s was not unscheduled cleaning", deletedSpecURI), e); } } /** {@inheritDoc} */ @Override public void onUpdateSpec(Spec updatedSpec) { if (this.helixManager.isPresent() && !this.helixManager.get().isConnected()) { // Specs in store will be notified when Scheduler is added as listener to FlowCatalog, so ignore // .. Specs if in cluster mode and Helix is not yet initialized _log.info("System not yet initialized. Skipping Spec Update: " + updatedSpec); return; } _log.info("Spec changed: " + updatedSpec); if (!(updatedSpec instanceof FlowSpec)) { return; } try { onAddSpec(updatedSpec); } catch (Exception e) { _log.error("Failed to update Spec: " + updatedSpec, e); } } private Properties createJobConfig(FlowSpec flowSpec) { Properties jobConfig = new Properties(); Properties flowSpecProperties = flowSpec.getConfigAsProperties(); jobConfig.putAll(this.properties); jobConfig.setProperty(ConfigurationKeys.JOB_NAME_KEY, flowSpec.getUri().toString()); jobConfig.setProperty(ConfigurationKeys.JOB_GROUP_KEY, flowSpec.getConfig().getValue(ConfigurationKeys.FLOW_GROUP_KEY).toString()); jobConfig.setProperty(ConfigurationKeys.FLOW_RUN_IMMEDIATELY, ConfigUtils.getString((flowSpec).getConfig(), ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false")); // todo : we should check if the job schedule is a valid cron schedule if (flowSpecProperties.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY) && StringUtils.isNotBlank( flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY))) { jobConfig.setProperty(ConfigurationKeys.JOB_SCHEDULE_KEY, flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY)); } // Note: the default values for missing flow name/group are different than the ones above to easily identify where // the values are not present initially jobConfig.setProperty(ConfigurationKeys.FLOW_NAME_KEY, flowSpecProperties.getProperty(ConfigurationKeys.FLOW_NAME_KEY, "<<missing flow name>>")); jobConfig.setProperty(ConfigurationKeys.FLOW_GROUP_KEY, flowSpecProperties.getProperty(ConfigurationKeys.FLOW_GROUP_KEY, "<<missing flow group>>")); return jobConfig; } /** * A Gobblin job to be scheduled. */ @DisallowConcurrentExecution @Slf4j public static class GobblinServiceJob extends BaseGobblinJob implements InterruptableJob { private static final Logger _log = LoggerFactory.getLogger(GobblinServiceJob.class); @Override public void executeImpl(JobExecutionContext context) throws JobExecutionException { try { // TODO: move this out of the try clause after location NPE source JobDetail jobDetail = context.getJobDetail(); _log.info("Starting FlowSpec " + jobDetail.getKey()); JobDataMap dataMap = jobDetail.getJobDataMap(); JobScheduler jobScheduler = (JobScheduler) dataMap.get(JOB_SCHEDULER_KEY); Properties jobProps = (Properties) dataMap.get(PROPERTIES_KEY); JobListener jobListener = (JobListener) dataMap.get(JOB_LISTENER_KEY); // Obtain trigger timestamp from trigger to pass to jobProps Trigger trigger = context.getTrigger(); // THIS current event has already fired if this method is called, so it now exists in <previousFireTime> long triggerTimeMillis = utcDateAsUTCEpochMillis(trigger.getPreviousFireTime()); // If the trigger is a reminder type event then utilize the trigger time saved in job properties rather than the // actual firing time if (jobDetail.getKey().getName().contains("reminder")) { String preservedConsensusEventTime = jobProps.getProperty( ConfigurationKeys.SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY, "0"); String expectedReminderTime = jobProps.getProperty( ConfigurationKeys.SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY, "0"); _log.info(jobSchedulerTracePrefixBuilder(jobProps) + "triggerTime: {} expectedReminderTime: {} - Reminder job" + " triggered by scheduler at {}", preservedConsensusEventTime, expectedReminderTime, triggerTimeMillis); // TODO: add a metric if expected reminder time far exceeds system time jobProps.setProperty(ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_MILLIS_KEY, preservedConsensusEventTime); } else { jobProps.setProperty(ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_MILLIS_KEY, String.valueOf(triggerTimeMillis)); _log.info(jobSchedulerTracePrefixBuilder(jobProps) + "triggerTime: {} nextTriggerTime: {} - Job triggered by " + "scheduler", triggerTimeMillis, utcDateAsUTCEpochMillis(trigger.getNextFireTime())); } jobScheduler.runJob(jobProps, jobListener); } catch (Throwable t) { if (t instanceof NullPointerException) { log.warn("NullPointerException encountered while trying to execute flow. Message: " + t.getMessage(), t); } throw new JobExecutionException(t); } finally { scheduledFlows.mark(); } } @Override public void interrupt() throws UnableToInterruptJobException { log.info("Job was interrupted"); } } /** * This class is responsible for running non-scheduled jobs. */ class NonScheduledJobRunner implements Runnable { private final URI specUri; private final Properties jobConfig; private final JobListener jobListener; private final boolean removeSpec; public NonScheduledJobRunner(URI uri, boolean removeSpec, Properties jobConfig, JobListener jobListener) { this.specUri = uri; this.jobConfig = jobConfig; this.jobListener = jobListener; this.removeSpec = removeSpec; } @Override public void run() { try { GobblinServiceJobScheduler.this.runJob(this.jobConfig, this.jobListener); if (flowCatalog.isPresent() && removeSpec) { Object syncObject = GobblinServiceJobScheduler.this.flowCatalog.get().getSyncObject(specUri.toString()); if (syncObject != null) { // if the sync object does not exist, this job must be set to run due to job submission at service restart synchronized (syncObject) { while (!GobblinServiceJobScheduler.this.flowCatalog.get().exists(specUri)) { syncObject.wait(); } } } GobblinServiceJobScheduler.this.flowCatalog.get().remove(specUri, new Properties(), false); GobblinServiceJobScheduler.this.scheduledFlowSpecs.remove(specUri.toString()); GobblinServiceJobScheduler.this.lastUpdatedTimeForFlowSpec.remove(specUri.toString()); } } catch (JobException je) { _log.error("Failed to run job " + this.jobConfig.getProperty(ConfigurationKeys.JOB_NAME_KEY), je); } catch (InterruptedException e) { _log.error("Failed to delete the spec " + specUri, e); } finally { nonScheduledFlows.mark(); } } } }
3,887
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/utils/HelixUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.utils; import java.io.UnsupportedEncodingException; import java.net.InetAddress; import java.net.URLDecoder; import java.net.URLEncoder; import java.net.UnknownHostException; import org.apache.helix.Criteria; import org.apache.helix.HelixManager; import org.apache.helix.HelixManagerFactory; import org.apache.helix.InstanceType; import org.apache.helix.PropertyKey; import org.apache.helix.manager.zk.ZKHelixManager; import org.apache.helix.model.HelixConfigScope; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Message; import org.apache.helix.tools.ClusterSetup; import org.slf4j.Logger; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.linkedin.data.DataMap; import com.linkedin.restli.common.HttpStatus; import com.linkedin.restli.server.RestLiServiceException; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.util.ConfigUtils; @Alpha @Slf4j public class HelixUtils { public static final String HELIX_INSTANCE_NAME_SEPARATOR = "@"; /*** * Build a Helix Manager (Helix Controller instance). * * @param helixInstanceName the Helix Instance name. * @param helixClusterName the Helix Cluster name. * @param zkConnectionString the ZooKeeper connection string. * @return HelixManager */ public static HelixManager buildHelixManager(String helixInstanceName, String helixClusterName, String zkConnectionString) { return HelixManagerFactory.getZKHelixManager(helixClusterName, helixInstanceName, InstanceType.CONTROLLER, zkConnectionString); } /** * Create a Helix cluster for the Gobblin Cluster application. * * @param zkConnectionString the ZooKeeper connection string * @param clusterName the Helix cluster name */ public static void createGobblinHelixCluster(String zkConnectionString, String clusterName) { createGobblinHelixCluster(zkConnectionString, clusterName, true); } /** * Create a Helix cluster for the Gobblin Cluster application. * * @param zkConnectionString the ZooKeeper connection string * @param clusterName the Helix cluster name * @param overwrite true to overwrite exiting cluster, false to reuse existing cluster */ public static void createGobblinHelixCluster(String zkConnectionString, String clusterName, boolean overwrite) { ClusterSetup clusterSetup = new ClusterSetup(zkConnectionString); // Create the cluster and overwrite if it already exists clusterSetup.addCluster(clusterName, overwrite); // Helix 0.6.x requires a configuration property to have the form key=value. String autoJoinConfig = ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN + "=true"; clusterSetup.setConfig(HelixConfigScope.ConfigScopeProperty.CLUSTER, clusterName, autoJoinConfig); } /** * Get a Helix instance name. * * @param namePrefix a prefix of Helix instance names * @param instanceId an integer instance ID * @return a Helix instance name that is a concatenation of the given prefix and instance ID */ public static String getHelixInstanceName(String namePrefix, int instanceId) { return namePrefix + "_" + instanceId; } @VisibleForTesting public static void sendUserDefinedMessage(String messageSubType, String messageVal, String messageId, InstanceType instanceType, HelixManager helixManager, Logger logger) { Criteria criteria = new Criteria(); criteria.setInstanceName("%"); criteria.setResource("%"); criteria.setPartition("%"); criteria.setPartitionState("%"); criteria.setRecipientInstanceType(instanceType); criteria.setSessionSpecific(true); Message message = new Message(Message.MessageType.USER_DEFINE_MSG.toString(), messageId); message.setMsgSubType(messageSubType); message.setAttribute(Message.Attributes.INNER_MESSAGE, messageVal); message.setMsgState(Message.MessageState.NEW); message.setTgtSessionId("*"); int messagesSent = helixManager.getMessagingService().send(criteria, message); if (messagesSent == 0) { logger.error(String.format("Failed to send the %s message to the participants", message)); } } private static String getUrlFromHelixInstanceName(String helixInstanceName) { if (!helixInstanceName.contains(HELIX_INSTANCE_NAME_SEPARATOR)) { return null; } else { String url = helixInstanceName.substring(helixInstanceName.indexOf(HELIX_INSTANCE_NAME_SEPARATOR) + 1); try { return URLDecoder.decode(url, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException("Failed to decode URL from helix instance name", e); } } } private static String getLeaderUrl(HelixManager helixManager) { PropertyKey key = helixManager.getHelixDataAccessor().keyBuilder().controllerLeader(); LiveInstance leader = helixManager.getHelixDataAccessor().getProperty(key); return getUrlFromHelixInstanceName(leader.getInstanceName()); } /** * If this host is not the leader, throw a {@link RestLiServiceException}, and include the URL of the leader host in * the message and in the errorDetails under the key {@link ServiceConfigKeys#LEADER_URL}. */ public static void throwErrorIfNotLeader(Optional<HelixManager> helixManager) { if (helixManager.isPresent() && !helixManager.get().isLeader()) { String leaderUrl = getLeaderUrl(helixManager.get()); if (leaderUrl == null) { throw new RuntimeException("Request sent to slave node but could not get leader node URL"); } RestLiServiceException exception = new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "Request must be sent to leader node at URL " + leaderUrl); exception.setErrorDetails(new DataMap(ImmutableMap.of(ServiceConfigKeys.LEADER_URL, leaderUrl))); throw exception; } } /** * Build helix instance name by getting {@link org.apache.gobblin.service.ServiceConfigKeys#HELIX_INSTANCE_NAME_KEY} * and appending the host, port, and service name with a separator */ public static String buildHelixInstanceName(Config config, String defaultInstanceName) { String helixInstanceName = ConfigUtils .getString(config, ServiceConfigKeys.HELIX_INSTANCE_NAME_KEY, defaultInstanceName); String url = ""; try { url = ConfigUtils.getString(config, ServiceConfigKeys.SERVICE_URL_PREFIX, "https://") + InetAddress.getLocalHost().getHostName() + ":" + ConfigUtils.getString(config, ServiceConfigKeys.SERVICE_PORT, "") + "/" + ConfigUtils.getString(config, ServiceConfigKeys.SERVICE_NAME, ""); url = HELIX_INSTANCE_NAME_SEPARATOR + URLEncoder.encode(url, "UTF-8"); } catch (UnknownHostException | UnsupportedEncodingException e) { log.warn("Failed to construct helix instance name", e); } return helixInstanceName + url; } }
3,888
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/utils/SharedFlowMetricsSingleton.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.utils; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; import com.google.common.base.Optional; import com.google.common.collect.Maps; import com.typesafe.config.Config; import java.net.URI; import java.util.Map; import javax.inject.Inject; import javax.inject.Singleton; import lombok.Data; import lombok.Setter; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.RootMetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.util.ConfigUtils; /** * Class to store flow related metrics shared between the {@link Orchestrator} and {@link DagManager} so we can easily * track all flow compilations and skipped flows handled between the two in a common place. */ @Singleton @Data public class SharedFlowMetricsSingleton { protected final MetricContext metricContext; private Map<URI, FlowCompiledState> flowGaugeStateBySpecUri = Maps.newHashMap(); private Optional<Meter> skippedFlowsMeter; @Setter public static class FlowCompiledState { private CompiledState state = CompiledState.UNKNOWN; } public enum CompiledState { FAILED(-1), UNKNOWN(0), SUCCESSFUL(1), SKIPPED(2); public int value; CompiledState(int value) { this.value = value; } } @Inject public SharedFlowMetricsSingleton(Config config) { this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), SharedFlowMetricsSingleton.class); this.skippedFlowsMeter = Optional.of(metricContext.contextAwareMeter(ServiceMetricNames.SKIPPED_FLOWS)); } /** * Adds a new FlowGauge to the metric context if one does not already exist for this flow spec */ public void addFlowGauge(Spec spec, Config flowConfig, String flowGroup, String flowName) { // Only register the metric of flows that are scheduled, run once flows should not be tracked indefinitely if (!flowGaugeStateBySpecUri.containsKey(spec.getUri()) && flowConfig.hasPath(ConfigurationKeys.JOB_SCHEDULE_KEY)) { String flowCompiledGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, flowGroup, flowName, ServiceMetricNames.COMPILED); flowGaugeStateBySpecUri.put(spec.getUri(), new FlowCompiledState()); ContextAwareGauge<Integer> gauge = RootMetricContext.get().newContextAwareGauge(flowCompiledGaugeName, () -> flowGaugeStateBySpecUri.get(spec.getUri()).state.value); RootMetricContext.get().register(flowCompiledGaugeName, gauge); } } /** * Updates the flowgauge related to the spec if the gauge is being tracked for the flow * @param spec FlowSpec to be updated * @param state desired state to set the gauge */ public void conditionallyUpdateFlowGaugeSpecState(Spec spec, CompiledState state) { if (flowGaugeStateBySpecUri.containsKey(spec.getUri())) { flowGaugeStateBySpecUri.get(spec.getUri()).setState(state); } } }
3,889
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/utils/FlowCompilationValidationHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.utils; import com.google.common.base.Optional; import com.typesafe.config.Config; import java.io.IOException; import java.util.Map; import lombok.Data; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.api.FlowSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.service.modules.flow.SpecCompiler; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.orchestration.TimingEventUtils; import org.apache.gobblin.service.modules.orchestration.UserQuotaManager; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.service.monitoring.FlowStatusGenerator; import org.apache.gobblin.util.ConfigUtils; /** * Helper class with functionality meant to be re-used between the DagManager and Orchestrator when launching * executions of a flow spec. In the common case, the Orchestrator receives a flow to orchestrate, performs necessary * validations, and forwards the execution responsibility to the DagManager. The DagManager's responsibility is to * carry out any flow action requests. However, with launch executions now being stored in the DagActionStateStore, on * restart or leadership change the DagManager has to perform validations before executing any launch actions the * previous leader was unable to complete. Rather than duplicating the code or introducing a circular dependency between * the DagManager and Orchestrator, this class is utilized to store the common functionality. It is stateful, * requiring all stateful pieces to be passed as input from the caller upon instantiating the helper. * Note: We expect further refactoring to be done to the DagManager in later stage of multi-active development, so we do * not attempt major reorganization as abstractions may change. */ @Slf4j @Data public final class FlowCompilationValidationHelper { private final SharedFlowMetricsSingleton sharedFlowMetricsSingleton; private final SpecCompiler specCompiler; private final UserQuotaManager quotaManager; private final Optional<EventSubmitter> eventSubmitter; private final FlowStatusGenerator flowStatusGenerator; private final boolean isFlowConcurrencyEnabled; /** * For a given a flowSpec, verifies that an execution is allowed (in case there is an ongoing execution) and the * flowspec can be compiled. If the pre-conditions hold, then a JobExecutionPlan is constructed and returned to the * caller. * @param flowSpec * @param optionalFlowExecutionId for scheduled (non-ad-hoc) flows, to pass the ID "laundered" via the DB; * see: {@link MysqlMultiActiveLeaseArbiter javadoc section titled * `Database event_timestamp laundering`} * @return jobExecutionPlan dag if one can be constructed for the given flowSpec */ public Optional<Dag<JobExecutionPlan>> createExecutionPlanIfValid(FlowSpec flowSpec, Optional<String> optionalFlowExecutionId) throws IOException, InterruptedException { Config flowConfig = flowSpec.getConfig(); String flowGroup = flowConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY); String flowName = flowConfig.getString(ConfigurationKeys.FLOW_NAME_KEY); //Wait for the SpecCompiler to become healthy. specCompiler.awaitHealthy(); Optional<TimingEvent> flowCompilationTimer = this.eventSubmitter.transform(submitter -> new TimingEvent(submitter, TimingEvent.FlowTimings.FLOW_COMPILED)); Optional<Dag<JobExecutionPlan>> jobExecutionPlanDagOptional = validateAndHandleConcurrentExecution(flowConfig, flowSpec, flowGroup, flowName); Map<String, String> flowMetadata = TimingEventUtils.getFlowMetadata(flowSpec); if (!jobExecutionPlanDagOptional.isPresent()) { return Optional.absent(); } if (jobExecutionPlanDagOptional.get() == null || jobExecutionPlanDagOptional.get().isEmpty()) { populateFlowCompilationFailedEventMessage(eventSubmitter, flowSpec, flowMetadata); return Optional.absent(); } addFlowExecutionIdIfAbsent(flowMetadata, optionalFlowExecutionId, jobExecutionPlanDagOptional.get()); if (flowCompilationTimer.isPresent()) { flowCompilationTimer.get().stop(flowMetadata); } return jobExecutionPlanDagOptional; } /** * Checks if flowSpec disallows concurrent executions, and if so then checks if another instance of the flow is * already running and emits a FLOW FAILED event. Otherwise, this check passes. * @return Optional<Dag<JobExecutionPlan>> if caller allowed to execute flow and compile spec, else absent Optional * @throws IOException */ public Optional<Dag<JobExecutionPlan>> validateAndHandleConcurrentExecution(Config flowConfig, Spec spec, String flowGroup, String flowName) throws IOException { boolean allowConcurrentExecution = ConfigUtils.getBoolean(flowConfig, ConfigurationKeys.FLOW_ALLOW_CONCURRENT_EXECUTION, isFlowConcurrencyEnabled); Dag<JobExecutionPlan> jobExecutionPlanDag = specCompiler.compileFlow(spec); if (isExecutionPermitted(flowStatusGenerator, flowName, flowGroup, allowConcurrentExecution)) { return Optional.of(jobExecutionPlanDag); } else { log.warn("Another instance of flowGroup: {}, flowName: {} running; Skipping flow execution since " + "concurrent executions are disabled for this flow.", flowGroup, flowName); sharedFlowMetricsSingleton.conditionallyUpdateFlowGaugeSpecState(spec, SharedFlowMetricsSingleton.CompiledState.SKIPPED); Instrumented.markMeter(sharedFlowMetricsSingleton.getSkippedFlowsMeter()); if (!isScheduledFlow((FlowSpec) spec)) { // For ad-hoc flow, we might already increase quota, we need to decrease here for (Dag.DagNode dagNode : jobExecutionPlanDag.getStartNodes()) { quotaManager.releaseQuota(dagNode); } } // Send FLOW_FAILED event Map<String, String> flowMetadata = TimingEventUtils.getFlowMetadata((FlowSpec) spec); flowMetadata.put(TimingEvent.METADATA_MESSAGE, "Flow failed because another instance is running and concurrent " + "executions are disabled. Set flow.allowConcurrentExecution to true in the flow spec to change this behaviour."); if (eventSubmitter.isPresent()) { new TimingEvent(eventSubmitter.get(), TimingEvent.FlowTimings.FLOW_FAILED).stop(flowMetadata); } return Optional.absent(); } } /** * Check if a FlowSpec instance is allowed to run. * * @param flowName * @param flowGroup * @param allowConcurrentExecution * @return true if the {@link FlowSpec} allows concurrent executions or if no other instance of the flow is currently RUNNING. */ private boolean isExecutionPermitted(FlowStatusGenerator flowStatusGenerator, String flowName, String flowGroup, boolean allowConcurrentExecution) { return allowConcurrentExecution || !flowStatusGenerator.isFlowRunning(flowName, flowGroup); } /** * Abstraction used to populate the message of and emit a FlowCompileFailed event for the Orchestrator. * @param spec * @param flowMetadata */ public static void populateFlowCompilationFailedEventMessage(Optional<EventSubmitter> eventSubmitter, Spec spec, Map<String, String> flowMetadata) { // For scheduled flows, we do not insert the flowExecutionId into the FlowSpec. As a result, if the flow // compilation fails (i.e. we are unable to find a path), the metadata will not have flowExecutionId. // In this case, the current time is used as the flow executionId. flowMetadata.putIfAbsent(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, Long.toString(System.currentTimeMillis())); String message = "Flow was not compiled successfully."; if (!((FlowSpec) spec).getCompilationErrors().isEmpty()) { message = message + " Compilation errors encountered: " + ((FlowSpec) spec).getCompilationErrors(); } flowMetadata.put(TimingEvent.METADATA_MESSAGE, message); Optional<TimingEvent> flowCompileFailedTimer = eventSubmitter.transform(submitter -> new TimingEvent(submitter, TimingEvent.FlowTimings.FLOW_COMPILE_FAILED)); if (flowCompileFailedTimer.isPresent()) { flowCompileFailedTimer.get().stop(flowMetadata); } } /** * If it is a scheduled flow (which does not have flowExecutionId in the FlowSpec) and the flow compilation is * successful, retrieve flowExecutionId from the JobSpec. */ public static void addFlowExecutionIdIfAbsent(Map<String,String> flowMetadata, Dag<JobExecutionPlan> jobExecutionPlanDag) { addFlowExecutionIdIfAbsent(flowMetadata, Optional.absent(), jobExecutionPlanDag); } /** * If it is a scheduled flow (which does not have flowExecutionId in the FlowSpec) and the flow compilation is * successful, add a flowExecutionId using the optional parameter if it exists otherwise retrieve it from the JobSpec. */ public static void addFlowExecutionIdIfAbsent(Map<String,String> flowMetadata, Optional<String> optionalFlowExecutionId, Dag<JobExecutionPlan> jobExecutionPlanDag) { if (optionalFlowExecutionId.isPresent()) { flowMetadata.putIfAbsent(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, optionalFlowExecutionId.get()); } flowMetadata.putIfAbsent(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, jobExecutionPlanDag.getNodes().get(0).getValue().getJobSpec().getConfigAsProperties().getProperty( ConfigurationKeys.FLOW_EXECUTION_ID_KEY)); } /** * Return true if the spec contains a schedule, false otherwise. */ public static boolean isScheduledFlow(FlowSpec spec) { return spec.getConfigAsProperties().containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY); } }
3,890
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/UpdatableFSFlowTemplateCatalog.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template_catalog; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; import com.typesafe.config.Config; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.service.modules.template.FlowTemplate; /** * {@link FSFlowTemplateCatalog} that keeps a cache of flow and job templates. It provides a public method clearTemplates() * for other classes to invoke, so that other classes can reload the job templates before they make a change. E.g. The * {@link org.apache.gobblin.service.monitoring.FsFlowGraphMonitor} has a configuration to clear the template cache before updating the flowgraph. */ public class UpdatableFSFlowTemplateCatalog extends FSFlowTemplateCatalog { private final Map<URI, FlowTemplate> flowTemplateMap = new ConcurrentHashMap<>(); private final Map<URI, List<JobTemplate>> jobTemplateMap = new ConcurrentHashMap<>(); private final ReadWriteLock rwLock; public UpdatableFSFlowTemplateCatalog(Config sysConfig, ReadWriteLock rwLock) throws IOException { super(sysConfig); this.rwLock = rwLock; } @Override public FlowTemplate getFlowTemplate(URI flowTemplateDirURI) throws SpecNotFoundException, JobTemplate.TemplateException, IOException, URISyntaxException { FlowTemplate flowTemplate = flowTemplateMap.getOrDefault(flowTemplateDirURI, null); if (flowTemplate == null) { flowTemplate = super.getFlowTemplate(flowTemplateDirURI); flowTemplateMap.put(flowTemplateDirURI, flowTemplate); } return flowTemplate; } @Override public List<JobTemplate> getJobTemplatesForFlow(URI flowTemplateDirURI) throws IOException, SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException { List<JobTemplate> jobTemplates = jobTemplateMap.getOrDefault(flowTemplateDirURI, null); if (jobTemplates == null) { jobTemplates = super.getJobTemplatesForFlow(flowTemplateDirURI); log.info("Loading flow template directly from {} and caching it.", flowTemplateDirURI); jobTemplateMap.put(flowTemplateDirURI, jobTemplates); } return jobTemplates; } /** * Clear cached templates so they will be reloaded next time {@link #getFlowTemplate(URI)} is called. */ public void clearTemplates() { this.rwLock.writeLock().lock(); log.info("Change detected, clearing flow template cache."); flowTemplateMap.clear(); jobTemplateMap.clear(); this.rwLock.writeLock().unlock(); } }
3,891
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/ObservingFSFlowEdgeTemplateCatalog.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template_catalog; import java.io.IOException; import java.net.URI; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.util.filesystem.PathAlterationListener; import org.apache.gobblin.util.filesystem.PathAlterationListenerAdaptor; /** * {@link FSFlowTemplateCatalog} that keeps a cache of flow and job templates. It has a * {@link org.apache.gobblin.util.filesystem.PathAlterationListener} on the root path, and clears the cache when a change * is detected so that the next call to {@link #getFlowTemplate(URI)} will use the updated files. */ @Slf4j public class ObservingFSFlowEdgeTemplateCatalog extends UpdatableFSFlowTemplateCatalog { private AtomicBoolean shouldRefreshFlowGraph = new AtomicBoolean(false); public ObservingFSFlowEdgeTemplateCatalog(Config sysConfig, ReadWriteLock rwLock) throws IOException { super(sysConfig, rwLock); } @Override protected PathAlterationListener getListener() { return new FlowCatalogPathAlterationListener(); } @Override protected void startUp() throws IOException { if (this.pathAlterationDetector != null) { this.pathAlterationDetector.start(); } } @Override public boolean getAndSetShouldRefreshFlowGraph(boolean value) { return this.shouldRefreshFlowGraph.getAndSet(value); } /** * {@link org.apache.gobblin.util.filesystem.PathAlterationListener} that clears flow/job template cache if a file is * created or updated. */ private class FlowCatalogPathAlterationListener extends PathAlterationListenerAdaptor { @Override public void onCheckDetectedChange() { clearTemplates(); getAndSetShouldRefreshFlowGraph(true); } } }
3,892
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/FlowCatalogWithTemplates.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template_catalog; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.List; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.service.modules.template.FlowTemplate; /** * A catalog that supports loading {@link FlowTemplate}s. */ public interface FlowCatalogWithTemplates { /** * Get {@link FlowTemplate} with given {@link URI}. * @throws SpecNotFoundException if a {@link JobTemplate} with given {@link URI} cannot be found. */ FlowTemplate getFlowTemplate(URI uri) throws SpecNotFoundException, IOException, JobTemplate.TemplateException, URISyntaxException; /** * * @param flowTemplateDirURI URI of the flow template directory. * @return a list of {@link JobTemplate}s for a given flow identified by its {@link URI}. * @throws IOException * @throws SpecNotFoundException * @throws JobTemplate.TemplateException */ public List<JobTemplate> getJobTemplatesForFlow(URI flowTemplateDirURI) throws IOException, SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException; }
3,893
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/FSFlowTemplateCatalog.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.template_catalog; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import com.google.common.base.Charsets; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.api.JobTemplate; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.runtime.job_catalog.FSJobCatalog; import org.apache.gobblin.runtime.template.HOCONInputStreamJobTemplate; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.template.FlowTemplate; import org.apache.gobblin.service.modules.template.HOCONInputStreamFlowTemplate; import org.apache.gobblin.util.PathUtils; import static org.apache.gobblin.runtime.AbstractJobLauncher.GOBBLIN_JOB_TEMPLATE_KEY; /** * An implementation of a catalog for {@link FlowTemplate}s. Provides basic API for retrieving a {@link FlowTemplate} * from the catalog and for retrieving {@link JobTemplate}s that are part of a {@link FlowTemplate}. * The flow and job configuration files are assumed to have the following path structure: * <p> /path/to/template/catalog/flowName/flow.conf </p> * <p> /path/to/template/catalog/flowName/jobs/job1.(job|template) </p> * <p> /path/to/template/catalog/flowName/jobs/job2.(job|template) </p> * * Avoid confusing with {@link org.apache.gobblin.runtime.spec_catalog.FlowCatalog} which is a catalog for * {@link org.apache.gobblin.runtime.api.FlowSpec}. * * Note that any exceptions thrown here should be propagated into called level for handling, since the handling * of exceptions while loading/resolving template is subject to caller logic. */ @Alpha public class FSFlowTemplateCatalog extends FSJobCatalog implements FlowCatalogWithTemplates { public static final String JOBS_DIR_NAME = "jobs"; public static final String FLOW_CONF_FILE_NAME = "flow.conf"; public static final List<String> JOB_FILE_EXTENSIONS = Arrays.asList(".job", ".template"); protected static final String FS_SCHEME = "FS"; /** * Initialize the FlowCatalog * @param sysConfig that must contain the fully qualified path of the flow template catalog * @throws IOException */ public FSFlowTemplateCatalog(Config sysConfig) throws IOException { super(sysConfig.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, sysConfig.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY))); } /** * * @param flowTemplateDirURI URI of the flow template directory * @return a {@link FlowTemplate} * @throws SpecNotFoundException * @throws JobTemplate.TemplateException * @throws IOException */ public FlowTemplate getFlowTemplate(URI flowTemplateDirURI) throws SpecNotFoundException, JobTemplate.TemplateException, IOException, URISyntaxException { if (!validateTemplateURI(flowTemplateDirURI)) { throw new JobTemplate.TemplateException(String.format("The FlowTemplate %s is not valid", flowTemplateDirURI)); } String templateCatalogDir = this.sysConfig.getString(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY); // path of uri is location of template file relative to the job configuration root directory Path templateDirPath = PathUtils.mergePaths(new Path(templateCatalogDir), new Path(flowTemplateDirURI.getPath())); Path templateFullPath = PathUtils.mergePaths(templateDirPath, new Path(FLOW_CONF_FILE_NAME)); FileSystem fs = FileSystem.get(templateFullPath.toUri(), new Configuration()); try (InputStream is = fs.open(templateFullPath)) { return new HOCONInputStreamFlowTemplate(is, flowTemplateDirURI, this); } } /** * * @param flowTemplateDirURI Relative URI of the flow template directory * @return a list of {@link JobTemplate}s for a given flow identified by its {@link URI}. * @throws IOException * @throws SpecNotFoundException * @throws JobTemplate.TemplateException */ public List<JobTemplate> getJobTemplatesForFlow(URI flowTemplateDirURI) throws IOException, SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException { PathFilter extensionFilter = file -> { for (String extension : JOB_FILE_EXTENSIONS) { if (file.getName().endsWith(extension)) { return true; } } return false; }; if (!validateTemplateURI(flowTemplateDirURI)) { throw new JobTemplate.TemplateException(String.format("The FlowTemplate %s is not valid", flowTemplateDirURI)); } List<JobTemplate> jobTemplates = new ArrayList<>(); String templateCatalogDir = this.sysConfig.getString(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY); //Flow templates are located under templateCatalogDir/flowEdgeTemplates Path flowTemplateDirPath = PathUtils.mergePaths(new Path(templateCatalogDir), new Path(flowTemplateDirURI)); //Job files (with extension .job) are located under templateCatalogDir/flowEdgeTemplates/jobs directory. Path jobFilePath = new Path(flowTemplateDirPath, JOBS_DIR_NAME); FileSystem fs = FileSystem.get(jobFilePath.toUri(), new Configuration()); for (FileStatus fileStatus : fs.listStatus(jobFilePath, extensionFilter)) { Config jobConfig = loadHoconFileAtPath(fileStatus.getPath()); //Check if the .job file has an underlying job template if (jobConfig.hasPath(GOBBLIN_JOB_TEMPLATE_KEY)) { URI jobTemplateRelativeUri = new URI(jobConfig.getString(GOBBLIN_JOB_TEMPLATE_KEY)); if (!jobTemplateRelativeUri.getScheme().equals(FS_SCHEME)) { throw new RuntimeException( "Expected scheme " + FS_SCHEME + " got unsupported scheme " + flowTemplateDirURI.getScheme()); } Path fullJobTemplatePath = PathUtils.mergePaths(new Path(templateCatalogDir), new Path(jobTemplateRelativeUri)); jobConfig = jobConfig.withFallback(loadHoconFileAtPath(fullJobTemplatePath)); } jobTemplates.add(new HOCONInputStreamJobTemplate(jobConfig, fileStatus.getPath().toUri(), this)); } return jobTemplates; } private Config loadHoconFileAtPath(Path filePath) throws IOException { try (InputStream is = fs.open(filePath)) { return ConfigFactory.parseReader(new InputStreamReader(is, Charsets.UTF_8)); } } /** * Determine if an URI of a jobTemplate or a FlowTemplate is valid. * @param flowURI The given job/flow template * @return true if the URI is valid. */ private boolean validateTemplateURI(URI flowURI) { if (!this.sysConfig.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)) { log.error("Missing config " + ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY); return false; } if (!flowURI.getScheme().equals(FS_SCHEME)) { log.error( "Expected scheme " + FS_SCHEME + " got unsupported scheme " + flowURI.getScheme()); return false; } return true; } public boolean getAndSetShouldRefreshFlowGraph(boolean value) { return false; } }
3,894
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/troubleshooter/MySqlMultiContextIssueRepository.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.troubleshooter; import java.lang.reflect.Type; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.time.Duration; import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Objects; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import com.google.common.util.concurrent.AbstractIdleService; import com.google.gson.reflect.TypeToken; import com.typesafe.config.Config; import javax.inject.Inject; import javax.inject.Singleton; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.troubleshooter.Issue; import org.apache.gobblin.runtime.troubleshooter.IssueSeverity; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException; import org.apache.gobblin.runtime.util.GsonUtils; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.db.ServiceDatabaseProvider; @Singleton @Slf4j public class MySqlMultiContextIssueRepository extends AbstractIdleService implements MultiContextIssueRepository { private final ServiceDatabaseProvider databaseProvider; private final Configuration configuration; private ScheduledExecutorService scheduledExecutor; public MySqlMultiContextIssueRepository(ServiceDatabaseProvider databaseProvider) { this(databaseProvider, MySqlMultiContextIssueRepository.Configuration.builder().build()); } @Inject public MySqlMultiContextIssueRepository(ServiceDatabaseProvider databaseProvider, Configuration configuration) { this.databaseProvider = Objects.requireNonNull(databaseProvider); this.configuration = Objects.requireNonNull(configuration); } @Override public List<Issue> getAll(String contextId) throws TroubleshooterException { Objects.requireNonNull(contextId, "contextId should not be null"); String querySql = "select code, time, severity, summary, details, source_class, exception_class, properties " + "from issues where context_id = ? order by position"; try (Connection connection = databaseProvider.getDatasource().getConnection(); PreparedStatement statement = connection.prepareStatement(querySql)) { statement.setString(1, contextId); ArrayList<Issue> issues = new ArrayList<>(); try (ResultSet results = statement.executeQuery()) { while (results.next()) { Issue.IssueBuilder issue = Issue.builder(); issue.code(results.getString(1)); issue.time(ZonedDateTime.ofInstant(Instant.ofEpochMilli(results.getTimestamp(2).getTime()), ZoneOffset.UTC)); issue.severity(IssueSeverity.valueOf(results.getString(3))); issue.summary(results.getString(4)); issue.details(results.getString(5)); issue.sourceClass(results.getString(6)); issue.exceptionClass(results.getString(7)); String serializedProperties = results.getString(8); if (serializedProperties != null) { Type mapType = new TypeToken<HashMap<String, String>>() { }.getType(); HashMap<String, String> properties = GsonUtils.GSON_WITH_DATE_HANDLING.fromJson(serializedProperties, mapType); issue.properties(properties); } issues.add(issue.build()); } } return issues; } catch (SQLException e) { throw new TroubleshooterException("Cannot read issues from the database", e); } } @Override public void put(String contextId, Issue issue) throws TroubleshooterException { Objects.requireNonNull(contextId, "contextId should not be null"); Objects.requireNonNull(issue, "issue should not be null"); put(contextId, Collections.singletonList(issue)); } @Override public void put(String contextId, List<Issue> issues) throws TroubleshooterException { Objects.requireNonNull(contextId, "contextId should not be null"); Objects.requireNonNull(issues, "issues should not be null"); String statementSql = "replace into issues (context_id, code, time, severity,summary,details,source_class,exception_class,properties) " + "values (?,?,?,?,?,?,?,?,?)"; try (Connection connection = databaseProvider.getDatasource().getConnection(); PreparedStatement statement = connection.prepareStatement(statementSql)) { connection.setAutoCommit(false); for (Issue issue : issues) { statement.setString(1, contextId); statement.setString(2, issue.getCode()); statement.setTimestamp(3, new Timestamp(issue.getTime().toInstant().toEpochMilli())); statement.setString(4, issue.getSeverity().toString()); statement.setString(5, issue.getSummary()); statement.setString(6, issue.getDetails()); statement.setString(7, issue.getSourceClass()); statement.setString(8, issue.getExceptionClass()); String serializedProperties = null; if (issue.getProperties() != null) { serializedProperties = GsonUtils.GSON_WITH_DATE_HANDLING.toJson(issue.getProperties()); } statement.setString(9, serializedProperties); statement.executeUpdate(); } connection.commit(); } catch (SQLException e) { throw new TroubleshooterException("Cannot save issue to the database", e); } } @Override public void remove(String contextId, String issueCode) throws TroubleshooterException { Objects.requireNonNull(contextId, "contextId should not be null"); Objects.requireNonNull(issueCode, "issueCode should not be null"); String statementSql = "delete from issues where context_id=? and code=?"; try (Connection connection = databaseProvider.getDatasource().getConnection(); PreparedStatement statement = connection.prepareStatement(statementSql)) { statement.setString(1, contextId); statement.setString(2, issueCode); statement.executeUpdate(); } catch (SQLException e) { throw new TroubleshooterException("Cannot remove issue from the database", e); } } @Override protected void startUp() throws Exception { scheduledExecutor = Executors.newScheduledThreadPool(1); scheduledExecutor.scheduleAtFixedRate(this::cleanupOldIssues, configuration.cleanupInterval.toMillis(), configuration.cleanupInterval.toMillis(), TimeUnit.MILLISECONDS); } @Override protected void shutDown() throws Exception { scheduledExecutor.shutdown(); } private void cleanupOldIssues() { try { deleteIssuesOlderThan(ZonedDateTime.now().minus(configuration.deleteIssuesOlderThan)); deleteOldIssuesOverTheCount(configuration.maxIssuesToKeep); } catch (Exception ex) { log.warn("Failed to cleanup old issues", ex); } } @VisibleForTesting public void deleteIssuesOlderThan(ZonedDateTime olderThanDate) throws SQLException { try (Connection connection = databaseProvider.getDatasource().getConnection(); PreparedStatement statement = connection.prepareStatement("delete from issues where time < ?")) { Instant deleteBefore = olderThanDate.withZoneSameInstant(ZoneOffset.UTC).toInstant(); statement.setTimestamp(1, new Timestamp(deleteBefore.toEpochMilli())); Stopwatch stopwatch = Stopwatch.createStarted(); int deletedIssues = statement.executeUpdate(); log.info("Deleted {} issues that are older than {} in {} ms", deletedIssues, deleteBefore, stopwatch.elapsed(TimeUnit.MILLISECONDS)); } } @VisibleForTesting public void deleteOldIssuesOverTheCount(long maxIssuesToKeep) throws SQLException { try (Connection connection = databaseProvider.getDatasource().getConnection(); PreparedStatement countQuery = connection.prepareStatement("select count(*) from issues"); ResultSet resultSet = countQuery.executeQuery()) { resultSet.next(); long totalIssueCount = resultSet.getLong(1); long issuesToRemove = totalIssueCount - maxIssuesToKeep; if (issuesToRemove <= 0) { return; } // position is a table-wide auto-increment field. older issues will have smaller position. try (PreparedStatement deleteStatement = connection .prepareStatement("delete from issues order by position limit ?")) { deleteStatement.setLong(1, issuesToRemove); Stopwatch stopwatch = Stopwatch.createStarted(); int deletedIssues = deleteStatement.executeUpdate(); log.info("Deleted {} issues to keep the total issue count under {} in {} ms", deletedIssues, maxIssuesToKeep, stopwatch.elapsed(TimeUnit.MILLISECONDS)); } } } @Builder @Getter @AllArgsConstructor @NoArgsConstructor public static class Configuration { @Builder.Default private Duration cleanupInterval = ServiceConfigKeys.DEFAULT_MYSQL_ISSUE_REPO_CLEANUP_INTERVAL; @Builder.Default private long maxIssuesToKeep = ServiceConfigKeys.DEFAULT_MYSQL_ISSUE_REPO_MAX_ISSUES_TO_KEEP; @Builder.Default private Duration deleteIssuesOlderThan = ServiceConfigKeys.DEFAULT_MYSQL_ISSUE_REPO_DELETE_ISSUES_OLDER_THAN; @Inject public Configuration(Config innerConfig) { this(); // see https://github.com/projectlombok/lombok/issues/1347 if (innerConfig.hasPath(ServiceConfigKeys.MYSQL_ISSUE_REPO_CLEANUP_INTERVAL)) { cleanupInterval = innerConfig.getDuration(ServiceConfigKeys.MYSQL_ISSUE_REPO_CLEANUP_INTERVAL); } if (innerConfig.hasPath(ServiceConfigKeys.MYSQL_ISSUE_REPO_MAX_ISSUES_TO_KEEP)) { maxIssuesToKeep = innerConfig.getLong(ServiceConfigKeys.MYSQL_ISSUE_REPO_MAX_ISSUES_TO_KEEP); } if (innerConfig.hasPath(ServiceConfigKeys.MYSQL_ISSUE_REPO_DELETE_ISSUES_OLDER_THAN)) { deleteIssuesOlderThan = innerConfig.getDuration(ServiceConfigKeys.MYSQL_ISSUE_REPO_DELETE_ISSUES_OLDER_THAN); } } } }
3,895
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/MysqlUserQuotaManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.codahale.metrics.Meter; import com.codahale.metrics.Timer; import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.List; import com.google.common.annotations.VisibleForTesting; import com.google.inject.Inject; import com.typesafe.config.Config; import com.zaxxer.hikari.HikariDataSource; import javax.inject.Singleton; import javax.sql.DataSource; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.broker.SharedResourcesBrokerFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.exception.QuotaExceededException; import org.apache.gobblin.metastore.MysqlDataSourceFactory; import org.apache.gobblin.runtime.metrics.RuntimeMetrics; import org.apache.gobblin.service.ServiceConfigKeys; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.util.ConfigUtils; /** * An implementation of {@link UserQuotaManager} that stores quota usage in mysql. */ @Slf4j @Singleton public class MysqlUserQuotaManager extends AbstractUserQuotaManager { public final static String CONFIG_PREFIX= "MysqlUserQuotaManager"; public final MysqlQuotaStore quotaStore; public final RunningDagIdsStore runningDagIds; private Meter quotaExceedsRequests; private Meter failedQuotaCheck; @Inject public MysqlUserQuotaManager(Config config) throws IOException { super(config); log.info("Going to initialize mysqlUserQuotaManager"); Config quotaStoreConfig; if (config.hasPath(CONFIG_PREFIX)) { quotaStoreConfig = config.getConfig(CONFIG_PREFIX).withFallback(config); } else { throw new IOException("Please specify the config for MysqlUserQuotaManager"); } this.quotaStore = createQuotaStore(quotaStoreConfig); this.runningDagIds = createRunningDagStore(quotaStoreConfig); this.failedQuotaCheck = this.metricContext.contextAwareMeter(RuntimeMetrics.GOBBLIN_MYSQL_QUOTA_MANAGER_UNEXPECTED_ERRORS); this.quotaExceedsRequests = this.metricContext.contextAwareMeter(RuntimeMetrics.GOBBLIN_MYSQL_QUOTA_MANAGER_QUOTA_REQUESTS_EXCEEDED); } void addDagId(Connection connection, String dagId) throws IOException { this.runningDagIds.add(connection, dagId); } @Override boolean containsDagId(String dagId) throws IOException { return this.runningDagIds.contains(dagId); } boolean removeDagId(Connection connection, String dagId) throws IOException { return this.runningDagIds.remove(connection, dagId); } // This implementation does not need to update quota usage when the service restarts or it's leadership status changes public void init(Collection<Dag<JobExecutionPlan>> dags) { } @Override public void checkQuota(Collection<Dag.DagNode<JobExecutionPlan>> dagNodes) throws IOException { try (Connection connection = this.quotaStore.dataSource.getConnection(); Timer.Context context = metricContext.timer(RuntimeMetrics.GOBBLIN_MYSQL_QUOTA_MANAGER_TIME_TO_CHECK_QUOTA).time()) { connection.setAutoCommit(false); for (Dag.DagNode<JobExecutionPlan> dagNode : dagNodes) { QuotaCheck quotaCheck = increaseAndCheckQuota(connection, dagNode); if ((!quotaCheck.proxyUserCheck || !quotaCheck.requesterCheck || !quotaCheck.flowGroupCheck)) { connection.rollback(); quotaExceedsRequests.mark(); throw new QuotaExceededException(quotaCheck.requesterMessage); } } connection.commit(); } catch (SQLException e) { this.failedQuotaCheck.mark(); throw new IOException(e); } } int incrementJobCount(Connection connection, String user, CountType countType) throws IOException, SQLException { return this.quotaStore.increaseCount(connection, user, countType); } void decrementJobCount(Connection connection,String user, CountType countType) throws IOException, SQLException { this.quotaStore.decreaseCount(connection, user, countType); } protected QuotaCheck increaseAndCheckQuota(Connection connection, Dag.DagNode<JobExecutionPlan> dagNode) throws SQLException, IOException { QuotaCheck quotaCheck = new QuotaCheck(true, true, true, ""); StringBuilder requesterMessage = new StringBuilder(); // Dag is already being tracked, no need to double increment for retries and multihop flows if (containsDagId(DagManagerUtils.generateDagId(dagNode).toString())) { return quotaCheck; } else { addDagId(connection, DagManagerUtils.generateDagId(dagNode).toString()); } String proxyUser = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), AzkabanProjectConfig.USER_TO_PROXY, null); String flowGroup = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), ConfigurationKeys.FLOW_GROUP_KEY, ""); String specExecutorUri = DagManagerUtils.getSpecExecutorUri(dagNode); boolean proxyUserCheck; if (proxyUser != null && dagNode.getValue().getCurrentAttempts() <= 1) { int proxyQuotaIncrement = incrementJobCountAndCheckQuota(connection, DagManagerUtils.getUserQuotaKey(proxyUser, dagNode), getQuotaForUser(proxyUser), CountType.USER_COUNT); proxyUserCheck = proxyQuotaIncrement >= 0; // proxy user quota check succeeds quotaCheck.setProxyUserCheck(proxyUserCheck); if (!proxyUserCheck) { // add 1 to proxyUserIncrement since proxyQuotaIncrement is the count before the increment requesterMessage.append(String.format( "Quota exceeded for proxy user %s on executor %s : quota=%s, requests above quota=%d%n", proxyUser, specExecutorUri, getQuotaForUser(proxyUser), Math.abs(proxyQuotaIncrement) + 1 - getQuotaForUser(proxyUser))); } } String serializedRequesters = DagManagerUtils.getSerializedRequesterList(dagNode); boolean requesterCheck = true; if (dagNode.getValue().getCurrentAttempts() <= 1) { List<String> uniqueRequesters = DagManagerUtils.getDistinctUniqueRequesters(serializedRequesters); for (String requester : uniqueRequesters) { int userQuotaIncrement = incrementJobCountAndCheckQuota(connection, DagManagerUtils.getUserQuotaKey(requester, dagNode), getQuotaForUser(requester), CountType.REQUESTER_COUNT); boolean thisRequesterCheck = userQuotaIncrement >= 0; // user quota check succeeds requesterCheck = requesterCheck && thisRequesterCheck; quotaCheck.setRequesterCheck(requesterCheck); if (!thisRequesterCheck) { requesterMessage.append(String.format("Quota exceeded for requester %s on executor %s : quota=%s, requests above quota=%d%n. ", requester, specExecutorUri, getQuotaForUser(requester), Math.abs(userQuotaIncrement) + 1 - getQuotaForUser(requester))); } } } boolean flowGroupCheck; if (dagNode.getValue().getCurrentAttempts() <= 1) { int flowGroupQuotaIncrement = incrementJobCountAndCheckQuota(connection, DagManagerUtils.getFlowGroupQuotaKey(flowGroup, dagNode), getQuotaForFlowGroup(flowGroup), CountType.FLOWGROUP_COUNT); flowGroupCheck = flowGroupQuotaIncrement >= 0; quotaCheck.setFlowGroupCheck(flowGroupCheck); if (!flowGroupCheck) { requesterMessage.append(String.format("Quota exceeded for flowgroup %s on executor %s : quota=%s, requests above quota=%d%n", flowGroup, specExecutorUri, getQuotaForFlowGroup(flowGroup), Math.abs(flowGroupQuotaIncrement) + 1 - getQuotaForFlowGroup(flowGroup))); } } quotaCheck.setRequesterMessage(requesterMessage.toString()); return quotaCheck; } protected int incrementJobCountAndCheckQuota(Connection connection, String key, int keyQuota, CountType countType) throws IOException, SQLException { int currentCount = incrementJobCount(connection, key, countType); if (currentCount >= keyQuota) { return -currentCount; } else { return currentCount; } } /** * Decrement the quota by one for the proxy user and requesters corresponding to the provided {@link Dag.DagNode}. * Returns true if the dag existed in the set of running dags and was removed successfully */ public boolean releaseQuota(Dag.DagNode<JobExecutionPlan> dagNode) throws IOException { Connection connection; try { connection = this.quotaStore.dataSource.getConnection(); connection.setAutoCommit(false); } catch (SQLException e) { throw new IOException(e); } try { boolean val = removeDagId(connection, DagManagerUtils.generateDagId(dagNode).toString()); if (!val) { return false; } String proxyUser = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), AzkabanProjectConfig.USER_TO_PROXY, null); if (proxyUser != null) { String proxyUserKey = DagManagerUtils.getUserQuotaKey(proxyUser, dagNode); decrementJobCount(connection, proxyUserKey, CountType.USER_COUNT); } String serializedRequesters = DagManagerUtils.getSerializedRequesterList(dagNode); try { for (String requester : DagManagerUtils.getDistinctUniqueRequesters(serializedRequesters)) { String requesterKey = DagManagerUtils.getUserQuotaKey(requester, dagNode); decrementJobCount(connection, requesterKey, CountType.REQUESTER_COUNT); } } catch (IOException e) { log.error("Failed to release quota for requester list " + serializedRequesters, e); return false; } String flowGroup = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), ConfigurationKeys.FLOW_GROUP_KEY, ""); decrementJobCount(connection, DagManagerUtils.getFlowGroupQuotaKey(flowGroup, dagNode), CountType.FLOWGROUP_COUNT); connection.commit(); } catch (SQLException ex) { throw new IOException(ex); } finally { try { connection.close(); } catch (SQLException ex) { throw new IOException(ex); } } return true; } @VisibleForTesting int getCount(String name, CountType countType) throws IOException { return this.quotaStore.getCount(name, countType); } /** * Creating an instance of MysqlQuotaStore. */ protected MysqlQuotaStore createQuotaStore(Config config) throws IOException { String quotaStoreTableName = ConfigUtils.getString(config, ServiceConfigKeys.QUOTA_STORE_DB_TABLE_KEY, ServiceConfigKeys.DEFAULT_QUOTA_STORE_DB_TABLE); DataSource dataSource = MysqlDataSourceFactory.get(config, SharedResourcesBrokerFactory.getImplicitBroker()); return new MysqlQuotaStore(dataSource, quotaStoreTableName); } protected RunningDagIdsStore createRunningDagStore(Config config) throws IOException { String quotaStoreTableName = ConfigUtils.getString(config, ServiceConfigKeys.RUNNING_DAG_IDS_DB_TABLE_KEY, ServiceConfigKeys.DEFAULT_RUNNING_DAG_IDS_DB_TABLE); DataSource dataSource = MysqlDataSourceFactory.get(config, SharedResourcesBrokerFactory.getImplicitBroker());; return new RunningDagIdsStore(dataSource, quotaStoreTableName); } static class MysqlQuotaStore { protected final DataSource dataSource; final String tableName; private final String GET_USER_COUNT; private final String GET_REQUESTER_COUNT; private final String GET_FLOWGROUP_COUNT; private final String INCREASE_USER_COUNT_SQL; private final String INCREASE_REQUESTER_COUNT_SQL; private final String INCREASE_FLOW_COUNT_SQL; private final String DECREASE_USER_COUNT_SQL; private final String DECREASE_REQUESTER_COUNT_SQL; private final String DECREASE_FLOWGROUP_COUNT_SQL; private final String DELETE_USER_SQL; @edu.umd.cs.findbugs.annotations.SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") public MysqlQuotaStore(DataSource dataSource, String tableName) throws IOException { this.dataSource = dataSource; this.tableName = tableName; GET_USER_COUNT = "SELECT user_count FROM " + tableName + " WHERE name = ? FOR UPDATE"; GET_REQUESTER_COUNT = "SELECT requester_count FROM " + tableName + " WHERE name = ? FOR UPDATE"; GET_FLOWGROUP_COUNT = "SELECT flowgroup_count FROM " + tableName + " WHERE name = ? FOR UPDATE"; INCREASE_USER_COUNT_SQL = "INSERT INTO " + tableName + " (name, user_count) VALUES (?, 1) " + "ON DUPLICATE KEY UPDATE user_count=user_count+1"; INCREASE_REQUESTER_COUNT_SQL = "INSERT INTO " + tableName + " (name, requester_count) VALUES (?, 1) " + "ON DUPLICATE KEY UPDATE requester_count=requester_count+1"; INCREASE_FLOW_COUNT_SQL = "INSERT INTO " + tableName + " (name, flowgroup_count) VALUES (?, 1) " + "ON DUPLICATE KEY UPDATE flowgroup_count=flowgroup_count+1"; DECREASE_USER_COUNT_SQL = "UPDATE " + tableName + " SET user_count=GREATEST(0, user_count-1) WHERE name = ?"; DECREASE_REQUESTER_COUNT_SQL = "UPDATE " + tableName + " SET requester_count=GREATEST(0, requester_count-1) WHERE name = ?"; DECREASE_FLOWGROUP_COUNT_SQL = "UPDATE " + tableName + " SET flowgroup_count=flowgroup_count-1 WHERE name = ?"; DELETE_USER_SQL = "DELETE FROM " + tableName + " WHERE name = ? AND user_count<1 AND flowgroup_count<1"; //Increase the length of name as we include the executor uri in it String createQuotaTable = "CREATE TABLE IF NOT EXISTS " + tableName + " (name VARCHAR(500) CHARACTER SET latin1 NOT NULL, " + "user_count INT NOT NULL DEFAULT 0, requester_count INT NOT NULL DEFAULT 0, flowgroup_count INT NOT NULL DEFAULT 0, " + "PRIMARY KEY (name), " + "UNIQUE INDEX ind (name))"; try (Connection connection = dataSource.getConnection(); PreparedStatement createStatement = connection.prepareStatement(createQuotaTable)) { createStatement.executeUpdate(); } catch (SQLException e) { // TODO: revisit use of connection test query following verification of successful connection pool migration: // If your driver supports JDBC4 we strongly recommend not setting this property. This is for "legacy" drivers // that do not support the JDBC4 Connection.isValid() API; see: // https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby log.warn("Failure in creating table {}. Validation query is set to {} Exception is {}", tableName, ((HikariDataSource) this.dataSource).getConnectionTestQuery(), e); throw new IOException(e); } } /** * returns count of countType for the name. if the row does not exist, returns zero. */ @VisibleForTesting int getCount(String name, CountType countType) throws IOException { String selectStatement = countType == CountType.USER_COUNT ? GET_USER_COUNT : GET_FLOWGROUP_COUNT; try (Connection connection = dataSource.getConnection(); PreparedStatement queryStatement = connection.prepareStatement(selectStatement)) { queryStatement.setString(1, name); try (ResultSet rs = queryStatement.executeQuery()) { if (rs.next()) { return rs.getInt(1); } else { return -1; } } } catch (Exception e) { throw new IOException("failure retrieving count from user/flowGroup " + name, e); } } public int increaseCount(Connection connection, String name, CountType countType) throws IOException, SQLException { String selectStatement; String increaseStatement; switch(countType) { case USER_COUNT: selectStatement = GET_USER_COUNT; increaseStatement = INCREASE_USER_COUNT_SQL; break; case REQUESTER_COUNT: selectStatement = GET_REQUESTER_COUNT; increaseStatement = INCREASE_REQUESTER_COUNT_SQL; break; case FLOWGROUP_COUNT: selectStatement = GET_FLOWGROUP_COUNT; increaseStatement = INCREASE_FLOW_COUNT_SQL; break; default: throw new IOException("Invalid count type " + countType); } ResultSet rs = null; try (PreparedStatement statement1 = connection.prepareStatement(selectStatement); PreparedStatement statement2 = connection.prepareStatement(increaseStatement)) { statement1.setString(1, name); statement2.setString(1, name); rs = statement1.executeQuery(); statement2.executeUpdate(); if (rs != null && rs.next()) { return rs.getInt(1); } else { return 0; } } finally { if (rs != null) { rs.close(); } } } public void decreaseCount(Connection connection, String name, CountType countType) throws IOException, SQLException { String selectStatement; String decreaseStatement; switch(countType) { case USER_COUNT: selectStatement = GET_USER_COUNT; decreaseStatement = DECREASE_USER_COUNT_SQL; break; case REQUESTER_COUNT: selectStatement = GET_REQUESTER_COUNT; decreaseStatement = DECREASE_REQUESTER_COUNT_SQL; break; case FLOWGROUP_COUNT: selectStatement = GET_FLOWGROUP_COUNT; decreaseStatement = DECREASE_FLOWGROUP_COUNT_SQL; break; default: throw new IOException("Invalid count type " + countType); } ResultSet rs = null; try ( PreparedStatement statement1 = connection.prepareStatement(selectStatement); PreparedStatement statement2 = connection.prepareStatement(decreaseStatement); PreparedStatement statement3 = connection.prepareStatement(DELETE_USER_SQL)) { statement1.setString(1, name); statement2.setString(1, name); statement3.setString(1, name); rs = statement1.executeQuery(); statement2.executeUpdate(); statement3.executeUpdate(); if (rs != null && rs.next() && rs.getInt(1) == 0) { log.warn("Decrement job count was called for " + name + " when the count was already zero/absent."); } } finally { if (rs != null) { rs.close(); } } } } static class RunningDagIdsStore { protected final DataSource dataSource; final String tableName; private final String CONTAINS_DAG_ID; private final String ADD_DAG_ID; private final String REMOVE_DAG_ID; @edu.umd.cs.findbugs.annotations.SuppressFBWarnings("SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING") public RunningDagIdsStore(DataSource dataSource, String tableName) throws IOException { this.dataSource = dataSource; this.tableName = tableName; CONTAINS_DAG_ID = "SELECT EXISTS(SELECT * FROM " + tableName + " WHERE dagId = ?)" ; ADD_DAG_ID = "INSERT INTO " + tableName + " (dagId) VALUES (?) "; REMOVE_DAG_ID = "DELETE FROM " + tableName + " WHERE dagId = ?"; String createQuotaTable = "CREATE TABLE IF NOT EXISTS " + tableName + " (dagId VARCHAR(500) CHARACTER SET latin1 NOT NULL, " + "PRIMARY KEY (dagId), UNIQUE INDEX ind (dagId))"; try (Connection connection = dataSource.getConnection(); PreparedStatement createStatement = connection.prepareStatement(createQuotaTable)) { createStatement.executeUpdate(); } catch (SQLException e) { throw new IOException("Failure creation table " + tableName, e); } } /** * returns true if the DagID is already present in the running dag store */ @VisibleForTesting boolean contains(String dagId) throws IOException { try (Connection connection = dataSource.getConnection(); PreparedStatement queryStatement = connection.prepareStatement(CONTAINS_DAG_ID)) { queryStatement.setString(1, dagId); try (ResultSet rs = queryStatement.executeQuery()) { rs.next(); return rs.getBoolean(1); } } catch (Exception e) { throw new IOException("Could not find if the dag " + dagId + " is already running.", e); } } public void add(Connection connection, String dagId) throws IOException { try (PreparedStatement statement = connection.prepareStatement(ADD_DAG_ID)) { statement.setString(1, dagId); statement.executeUpdate(); } catch (SQLException e) { throw new IOException("Failure adding dag " + dagId, e); } } public boolean remove(Connection connection, String dagId) throws IOException { try (PreparedStatement statement = connection.prepareStatement(REMOVE_DAG_ID)) { statement.setString(1, dagId); int count = statement.executeUpdate(); return count == 1; } catch (SQLException e) { throw new IOException("Could not remove dag " + dagId, e); } } } }
3,896
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/DagManagerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.apache.commons.lang3.StringUtils; import com.codahale.metrics.MetricRegistry; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.GobblinMetricsKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareCounter; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.GobblinMetrics; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.MetricTagNames; import org.apache.gobblin.metrics.RootMetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.metrics.metric.filter.MetricNameRegexFilter; import org.apache.gobblin.service.FlowId; import org.apache.gobblin.service.RequesterService; import org.apache.gobblin.service.ServiceRequester; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; import org.apache.gobblin.util.ConfigUtils; @Slf4j public class DagManagerMetrics { private static final Map<String, DagManager.FlowState> flowGauges = Maps.newConcurrentMap(); // Meters representing the total number of flows in a given state private ContextAwareMeter allSuccessfulMeter; private ContextAwareMeter allFailedMeter; private ContextAwareMeter allRunningMeter; private ContextAwareMeter allSlaExceededMeter; private ContextAwareMeter allStartSlaExceededMeter; // Meters representing the flows in a given state per flowgroup private final Map<String, ContextAwareMeter> groupSuccessfulMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> groupFailureMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> groupStartSlaExceededMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> groupSlaExceededMeters = Maps.newConcurrentMap(); // Meters representing the jobs in a given state per executor // These metrics need to be invoked differently to account for automated retries and multihop scenarios. private final Map<String, ContextAwareMeter> executorSuccessMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> executorFailureMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> executorStartSlaExceededMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> executorSlaExceededMeters = Maps.newConcurrentMap(); private final Map<String, ContextAwareMeter> executorJobSentMeters = Maps.newConcurrentMap(); // Metrics for unexpected flow handling failures private ContextAwareCounter failedLaunchEventsOnActivationCount; MetricContext metricContext; public DagManagerMetrics(MetricContext metricContext) { this.metricContext = metricContext; } public DagManagerMetrics() { // Create a new metric context for the DagManagerMetrics tagged appropriately List<Tag<?>> tags = new ArrayList<>(); tags.add(new Tag<>(MetricTagNames.METRIC_BACKEND_REPRESENTATION, GobblinMetrics.MetricType.COUNTER)); this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), this.getClass(), tags); } public void activate() { if (this.metricContext != null) { allSuccessfulMeter = metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SUCCESSFUL_FLOW_METER)); allFailedMeter = metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.FAILED_FLOW_METER)); allStartSlaExceededMeter = metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER)); allSlaExceededMeter = metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER)); allRunningMeter = metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.JOBS_SENT_TO_SPEC_EXECUTOR)); failedLaunchEventsOnActivationCount = metricContext.contextAwareCounter( MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.DAG_MANAGER_FAILED_LAUNCH_EVENTS_ON_STARTUP_COUNT)); } } public void registerFlowMetric(FlowId flowId, Dag<JobExecutionPlan> dag) { // Do not register flow-specific metrics for an adhoc flow if (!flowGauges.containsKey(flowId.toString()) && DagManagerUtils.shouldFlowOutputMetrics(dag)) { String flowStateGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, flowId.getFlowGroup(), flowId.getFlowName(), ServiceMetricNames.RUNNING_STATUS); flowGauges.put(flowId.toString(), DagManager.FlowState.RUNNING); ContextAwareGauge<Integer> gauge = RootMetricContext .get().newContextAwareGauge(flowStateGaugeName, () -> flowGauges.get(flowId.toString()).value); RootMetricContext.get().register(flowStateGaugeName, gauge); } } public void incrementRunningJobMetrics(Dag.DagNode<JobExecutionPlan> dagNode) { if (this.metricContext != null) { this.getRunningJobsCounterForExecutor(dagNode).inc(); this.getRunningJobsCounterForUser(dagNode).forEach(ContextAwareCounter::inc); } } public void decrementRunningJobMetrics(Dag.DagNode<JobExecutionPlan> dagNode) { if (this.metricContext != null) { this.getRunningJobsCounterForExecutor(dagNode).dec(); this.getRunningJobsCounterForUser(dagNode).forEach(ContextAwareCounter::dec); } } /** * Updates flowGauges with the appropriate state if the gauge is being tracked for the flow * @param flowId * @param state */ public void conditionallyMarkFlowAsState(FlowId flowId, DagManager.FlowState state) { if (flowGauges.containsKey(flowId.toString())) { flowGauges.put(flowId.toString(), state); } } public void emitFlowSuccessMetrics(FlowId flowId) { if (this.metricContext != null) { this.conditionallyMarkFlowAsState(flowId, DagManager.FlowState.SUCCESSFUL); this.allSuccessfulMeter.mark(); this.getGroupMeterForDag(flowId.getFlowGroup(), ServiceMetricNames.SUCCESSFUL_FLOW_METER, groupSuccessfulMeters).mark(); } } public void emitFlowFailedMetrics(FlowId flowId) { if (this.metricContext != null) { this.conditionallyMarkFlowAsState(flowId, DagManager.FlowState.FAILED); this.allFailedMeter.mark(); this.getGroupMeterForDag(flowId.getFlowGroup(), ServiceMetricNames.FAILED_FLOW_METER, groupFailureMeters).mark(); } } public void emitFlowSlaExceededMetrics(FlowId flowId) { if (this.metricContext != null) { this.conditionallyMarkFlowAsState(flowId, DagManager.FlowState.FAILED); this.allSlaExceededMeter.mark(); this.getGroupMeterForDag(flowId.getFlowGroup(), ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER, groupSlaExceededMeters).mark(); } } public void incrementExecutorSuccess(Dag.DagNode<JobExecutionPlan> node) { if (this.metricContext != null) { this.getExecutorMeterForDag(node, ServiceMetricNames.SUCCESSFUL_FLOW_METER, executorSuccessMeters).mark(); } } public void incrementExecutorFailed(Dag.DagNode<JobExecutionPlan> node) { if (this.metricContext != null) { this.getExecutorMeterForDag(node, ServiceMetricNames.FAILED_FLOW_METER, executorFailureMeters).mark(); } } public void incrementExecutorSlaExceeded(Dag.DagNode<JobExecutionPlan> node) { if (this.metricContext != null) { this.getExecutorMeterForDag(node, ServiceMetricNames.SLA_EXCEEDED_FLOWS_METER, executorSlaExceededMeters).mark(); } } public void incrementJobsSentToExecutor(Dag.DagNode<JobExecutionPlan> node) { if (this.metricContext != null) { this.getExecutorMeterForDag(node, ServiceMetricNames.JOBS_SENT_TO_SPEC_EXECUTOR, executorJobSentMeters).mark(); this.allRunningMeter.mark(); } } // Increment the counts for start sla during the flow submission rather than cleanup to account for retries obfuscating this metric public void incrementCountsStartSlaExceeded(Dag.DagNode<JobExecutionPlan> node) { String flowGroup = node.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY); if (this.metricContext != null) { this.getGroupMeterForDag(flowGroup, ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER, groupStartSlaExceededMeters); this.allStartSlaExceededMeter.mark(); this.getExecutorMeterForDag(node, ServiceMetricNames.START_SLA_EXCEEDED_FLOWS_METER, executorStartSlaExceededMeters).mark(); } } // Increment the count for num of failed launches during leader activation public void incrementFailedLaunchCount() { if (this.metricContext != null) { this.failedLaunchEventsOnActivationCount.inc(); } } private List<ContextAwareCounter> getRunningJobsCounterForUser(Dag.DagNode<JobExecutionPlan> dagNode) { Config configs = dagNode.getValue().getJobSpec().getConfig(); String proxy = ConfigUtils.getString(configs, AzkabanProjectConfig.USER_TO_PROXY, null); List<ContextAwareCounter> counters = new ArrayList<>(); if (StringUtils.isNotEmpty(proxy)) { counters.add(this.metricContext.contextAwareCounter( MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, proxy))); } try { String serializedRequesters = DagManagerUtils.getSerializedRequesterList(dagNode); if (StringUtils.isNotEmpty(serializedRequesters)) { List<ServiceRequester> requesters = RequesterService.deserialize(serializedRequesters); for (ServiceRequester requester : requesters) { counters.add(this.metricContext.contextAwareCounter(MetricRegistry .name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.SERVICE_USERS, requester.getName()))); } } } catch (IOException e) { log.error("Error while fetching requester list.", e); } return counters; } private ContextAwareCounter getRunningJobsCounterForExecutor(Dag.DagNode<JobExecutionPlan> dagNode) { return this.metricContext.contextAwareCounter( MetricRegistry.name( ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, DagManagerUtils.getSpecExecutorName(dagNode), ServiceMetricNames.RUNNING_FLOWS_COUNTER)); } private ContextAwareMeter getGroupMeterForDag(String flowGroup, String meterName, Map<String, ContextAwareMeter> meterMap) { return meterMap.computeIfAbsent(flowGroup, group -> metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, group, meterName))); } /** * Used to track metrics for different specExecutors to detect issues with the specExecutor itself * @param dagNode * @param meterName * @param meterMap * @return */ private ContextAwareMeter getExecutorMeterForDag(Dag.DagNode<JobExecutionPlan> dagNode, String meterName, Map<String, ContextAwareMeter> meterMap) { String executorName = DagManagerUtils.getSpecExecutorName(dagNode); return meterMap.computeIfAbsent(executorName, executorUri -> metricContext.contextAwareMeter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, executorUri, meterName))); } @VisibleForTesting protected static MetricNameRegexFilter getMetricsFilterForDagManager() { return new MetricNameRegexFilter(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX + "\\..*\\." + ServiceMetricNames.RUNNING_STATUS); } public void cleanup() { // Add null check so that unit test will not affect each other when we de-active non-instrumented DagManager if(this.metricContext != null && this.metricContext.getTagMap().get(GobblinMetricsKeys.CLASS_META).equals(DagManager.class.getSimpleName())) { // The DMThread's metrics mappings follow the lifecycle of the DMThread itself and so are lost by DM deactivation-reactivation but the RootMetricContext is a (persistent) singleton. // To avoid IllegalArgumentException by the RMC preventing (re-)add of a metric already known, remove all metrics that a new DMThread thread would attempt to add (in DagManagerThread::initialize) whenever running post-re-enablement RootMetricContext.get().removeMatching(getMetricsFilterForDagManager()); } } }
3,897
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/FlowTriggerHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.typesafe.config.Config; import java.io.IOException; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; import java.util.Date; import java.util.Locale; import java.util.Properties; import java.util.Random; import javax.inject.Inject; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.ContextAwareCounter; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.ServiceMetricNames; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.MultiActiveLeaseArbiter; import org.apache.gobblin.runtime.api.MysqlMultiActiveLeaseArbiter; import org.apache.gobblin.scheduler.JobScheduler; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler; import org.apache.gobblin.util.ConfigUtils; import org.quartz.JobDataMap; import org.quartz.JobDetail; import org.quartz.JobKey; import org.quartz.SchedulerException; import org.quartz.Trigger; import org.quartz.impl.JobDetailImpl; /** * Handler used to coordinate multiple hosts with enabled schedulers to respond to flow action events. It uses the * {@link MysqlMultiActiveLeaseArbiter} to determine a single lease owner at a given time * for a flow action event. After acquiring the lease, it persists the flow action event to the {@link DagActionStore} * to be eventually acted upon by the host with the active DagManager. Once it has completed this action, it will mark * the lease as completed by calling the * {@link MysqlMultiActiveLeaseArbiter.recordLeaseSuccess()} method. Hosts that do not gain the lease for the event, * instead schedule a reminder using the {@link SchedulerService} to check back in on the previous lease owner's * completion status after the lease should expire to ensure the event is handled in failure cases. */ @Slf4j public class FlowTriggerHandler { private final int schedulerMaxBackoffMillis; private static Random random = new Random(); protected Optional<MultiActiveLeaseArbiter> multiActiveLeaseArbiter; protected SchedulerService schedulerService; protected Optional<DagActionStore> dagActionStore; private MetricContext metricContext; private ContextAwareMeter numFlowsSubmitted; private ContextAwareCounter leaseObtainedCount; private ContextAwareCounter leasedToAnotherStatusCount; private ContextAwareCounter noLongerLeasingStatusCount; private ContextAwareCounter jobDoesNotExistInSchedulerCount; private ContextAwareCounter failedToSetEventReminderCount; private ContextAwareMeter leasesObtainedDueToReminderCount; private ContextAwareMeter failedToRecordLeaseSuccessCount; private ContextAwareMeter recordedLeaseSuccessCount; @Inject public FlowTriggerHandler(Config config, Optional<MultiActiveLeaseArbiter> leaseDeterminationStore, SchedulerService schedulerService, Optional<DagActionStore> dagActionStore) { this.schedulerMaxBackoffMillis = ConfigUtils.getInt(config, ConfigurationKeys.SCHEDULER_MAX_BACKOFF_MILLIS_KEY, ConfigurationKeys.DEFAULT_SCHEDULER_MAX_BACKOFF_MILLIS); this.multiActiveLeaseArbiter = leaseDeterminationStore; this.schedulerService = schedulerService; this.dagActionStore = dagActionStore; this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(ConfigUtils.configToProperties(config)), this.getClass()); this.numFlowsSubmitted = metricContext.contextAwareMeter(ServiceMetricNames.GOBBLIN_FLOW_TRIGGER_HANDLER_NUM_FLOWS_SUBMITTED); this.leaseObtainedCount = this.metricContext.contextAwareCounter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_LEASE_OBTAINED_COUNT); this.leasedToAnotherStatusCount = this.metricContext.contextAwareCounter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_LEASED_TO_ANOTHER_COUNT); this.noLongerLeasingStatusCount = this.metricContext.contextAwareCounter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_NO_LONGER_LEASING_COUNT); this.jobDoesNotExistInSchedulerCount = this.metricContext.contextAwareCounter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_JOB_DOES_NOT_EXIST_COUNT); this.failedToSetEventReminderCount = this.metricContext.contextAwareCounter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_FAILED_TO_SET_REMINDER_COUNT); this.leasesObtainedDueToReminderCount = this.metricContext.contextAwareMeter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_LEASES_OBTAINED_DUE_TO_REMINDER_COUNT); this.failedToRecordLeaseSuccessCount = this.metricContext.contextAwareMeter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_FAILED_TO_RECORD_LEASE_SUCCESS_COUNT); this.recordedLeaseSuccessCount = this.metricContext.contextAwareMeter(ServiceMetricNames.FLOW_TRIGGER_HANDLER_RECORDED_LEASE_SUCCESS_COUNT); } /** * This method is used in the multi-active scheduler case for one or more hosts to respond to a flow action event * by attempting a lease for the flow event and processing the result depending on the status of the attempt. * @param jobProps * @param flowAction * @param eventTimeMillis * @param isReminderEvent * @throws IOException */ public void handleTriggerEvent(Properties jobProps, DagActionStore.DagAction flowAction, long eventTimeMillis, boolean isReminderEvent) throws IOException { if (multiActiveLeaseArbiter.isPresent()) { MultiActiveLeaseArbiter.LeaseAttemptStatus leaseAttemptStatus = multiActiveLeaseArbiter.get().tryAcquireLease( flowAction, eventTimeMillis, isReminderEvent); // The flow action contained in the`LeaseAttemptStatus` from the lease arbiter contains an updated flow execution // id. From this point onwards, always use the newer version of the flow action to easily track the action through // orchestration and execution. if (leaseAttemptStatus instanceof MultiActiveLeaseArbiter.LeaseObtainedStatus) { if (isReminderEvent) { this.leasesObtainedDueToReminderCount.mark(); } MultiActiveLeaseArbiter.LeaseObtainedStatus leaseObtainedStatus = (MultiActiveLeaseArbiter.LeaseObtainedStatus) leaseAttemptStatus; this.leaseObtainedCount.inc(); if (persistFlowAction(leaseObtainedStatus)) { log.info("Successfully persisted lease: [{}, eventTimestamp: {}] ", leaseObtainedStatus.getFlowAction(), leaseObtainedStatus.getEventTimeMillis()); this.recordedLeaseSuccessCount.mark(); return; } this.failedToRecordLeaseSuccessCount.mark(); // If persisting the flow action failed, then we set another trigger for this event to occur immediately to // re-attempt handling the event scheduleReminderForEvent(jobProps, new MultiActiveLeaseArbiter.LeasedToAnotherStatus(leaseObtainedStatus.getFlowAction(), 0L), eventTimeMillis); return; } else if (leaseAttemptStatus instanceof MultiActiveLeaseArbiter.LeasedToAnotherStatus) { this.leasedToAnotherStatusCount.inc(); scheduleReminderForEvent(jobProps, (MultiActiveLeaseArbiter.LeasedToAnotherStatus) leaseAttemptStatus, eventTimeMillis); return; } else if (leaseAttemptStatus instanceof MultiActiveLeaseArbiter.NoLongerLeasingStatus) { this.noLongerLeasingStatusCount.inc(); log.debug("Received type of leaseAttemptStatus: [{}, eventTimestamp: {}] ", leaseAttemptStatus.getClass().getName(), eventTimeMillis); return; } throw new RuntimeException(String.format("Received type of leaseAttemptStatus: %s not handled by this method", leaseAttemptStatus.getClass().getName())); } else { throw new RuntimeException(String.format("Multi-active scheduler is not enabled so trigger event should not be " + "handled with this method.")); } } // Called after obtaining a lease to persist the flow action to {@link DagActionStore} and mark the lease as done private boolean persistFlowAction(MultiActiveLeaseArbiter.LeaseObtainedStatus leaseStatus) { if (this.dagActionStore.isPresent() && this.multiActiveLeaseArbiter.isPresent()) { try { DagActionStore.DagAction flowAction = leaseStatus.getFlowAction(); this.dagActionStore.get().addDagAction(flowAction.getFlowGroup(), flowAction.getFlowName(), flowAction.getFlowExecutionId(), flowAction.getFlowActionType()); // If the flow action has been persisted to the {@link DagActionStore} we can close the lease this.numFlowsSubmitted.mark(); return this.multiActiveLeaseArbiter.get().recordLeaseSuccess(leaseStatus); } catch (IOException e) { throw new RuntimeException(e); } } else { throw new RuntimeException("DagActionStore is " + (this.dagActionStore.isPresent() ? "" : "NOT") + " present. " + "Multi-Active scheduler is " + (this.multiActiveLeaseArbiter.isPresent() ? "" : "NOT") + " present. Both " + "should be enabled if this method is called."); } } /** * This method is used by {@link FlowTriggerHandler.handleTriggerEvent} to schedule a self-reminder to check on * the other participant's progress to finish acting on a flow action after the time the lease should expire. * @param jobProps * @param status used to extract event to be reminded for and the minimum time after which reminder should occur * @param triggerEventTimeMillis the event timestamp we were originally handling */ private void scheduleReminderForEvent(Properties jobProps, MultiActiveLeaseArbiter.LeasedToAnotherStatus status, long triggerEventTimeMillis) { DagActionStore.DagAction flowAction = status.getFlowAction(); JobKey origJobKey = new JobKey(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY, "<<no job name>>"), jobProps.getProperty(ConfigurationKeys.JOB_GROUP_KEY, "<<no job group>>")); try { if (!this.schedulerService.getScheduler().checkExists(origJobKey)) { log.warn("Skipping setting a reminder for a job that does not exist in the scheduler. Key: {}", origJobKey); this.jobDoesNotExistInSchedulerCount.inc(); return; } Trigger reminderTrigger = createAndScheduleReminder(origJobKey, status, triggerEventTimeMillis); log.info("Flow Trigger Handler - [{}, eventTimestamp: {}] - SCHEDULED REMINDER for event {} in {} millis", flowAction, triggerEventTimeMillis, status.getEventTimeMillis(), reminderTrigger.getNextFireTime()); } catch (SchedulerException e) { log.warn("Failed to add job reminder due to SchedulerException for job {} trigger event {}. Exception: {}", origJobKey, status.getEventTimeMillis(), e); this.failedToSetEventReminderCount.inc(); } } /** * Create a new trigger with a `reminder` suffix that is set to fire at the minimum reminder wait time calculated from * the LeasedToAnotherStatus provided by the caller. The new trigger and job will store the original * triggerEventTimeMillis to revisit upon firing. * @param origJobKey * @param status * @param triggerEventTimeMillis * @return Trigger for reminder * @throws SchedulerException */ protected Trigger createAndScheduleReminder(JobKey origJobKey, MultiActiveLeaseArbiter.LeasedToAnotherStatus status, long triggerEventTimeMillis) throws SchedulerException { // Generate a suffix to differentiate the reminder Job and Trigger from the original JobKey and Trigger, so we can // allow us to keep track of additional properties needed for reminder events (all triggers associated with one job // refer to the same set of jobProperties) String reminderSuffix = createSuffixForJobTrigger(status); JobKey reminderJobKey = new JobKey(origJobKey.getName() + reminderSuffix, origJobKey.getGroup()); JobDetailImpl jobDetail = createJobDetailForReminderEvent(origJobKey, reminderJobKey, status); Trigger reminderTrigger = JobScheduler.createTriggerForJob(reminderJobKey, getJobPropertiesFromJobDetail(jobDetail), Optional.of(reminderSuffix)); log.debug("Flow Trigger Handler - [{}, eventTimestamp: {}] - attempting to schedule reminder for event {} with " + "reminderJobKey {} and reminderTriggerKey {}", status.getFlowAction(), triggerEventTimeMillis, status.getEventTimeMillis(), reminderJobKey, reminderTrigger.getKey()); this.schedulerService.getScheduler().scheduleJob(jobDetail, reminderTrigger); return reminderTrigger; } /** * Create suffix to add to end of flow name to differentiate reminder triggers from the original job schedule trigger * and ensure they are added to the scheduler. * @param leasedToAnotherStatus * @return */ @VisibleForTesting public static String createSuffixForJobTrigger(MultiActiveLeaseArbiter.LeasedToAnotherStatus leasedToAnotherStatus) { return "reminder_for_" + leasedToAnotherStatus.getEventTimeMillis(); } /** * Helper function used to extract JobDetail for job identified by the originalKey and update it be associated with * the event to revisit. It will update the jobKey to the reminderKey provides and the Properties map to * contain the cron scheduler for the reminder event and information about the event to revisit * @param originalKey * @param reminderKey * @param status * @return * @throws SchedulerException */ protected JobDetailImpl createJobDetailForReminderEvent(JobKey originalKey, JobKey reminderKey, MultiActiveLeaseArbiter.LeasedToAnotherStatus status) throws SchedulerException { JobDetailImpl jobDetail = (JobDetailImpl) this.schedulerService.getScheduler().getJobDetail(originalKey); jobDetail.setKey(reminderKey); JobDataMap jobDataMap = jobDetail.getJobDataMap(); jobDataMap = updatePropsInJobDataMap(jobDataMap, status, schedulerMaxBackoffMillis); jobDetail.setJobDataMap(jobDataMap); return jobDetail; } public static Properties getJobPropertiesFromJobDetail(JobDetail jobDetail) { return (Properties) jobDetail.getJobDataMap().get(GobblinServiceJobScheduler.PROPERTIES_KEY); } /** * Updates the cronExpression, reminderTimestamp, originalEventTime values in the properties map of a JobDataMap * provided returns the updated JobDataMap to the user * @param jobDataMap * @param leasedToAnotherStatus * @param schedulerMaxBackoffMillis * @return */ @VisibleForTesting public static JobDataMap updatePropsInJobDataMap(JobDataMap jobDataMap, MultiActiveLeaseArbiter.LeasedToAnotherStatus leasedToAnotherStatus, int schedulerMaxBackoffMillis) { Properties prevJobProps = (Properties) jobDataMap.get(GobblinServiceJobScheduler.PROPERTIES_KEY); // Add a small randomization to the minimum reminder wait time to avoid 'thundering herd' issue long delayPeriodMillis = leasedToAnotherStatus.getMinimumLingerDurationMillis() + random.nextInt(schedulerMaxBackoffMillis); String cronExpression = createCronFromDelayPeriod(delayPeriodMillis); prevJobProps.setProperty(ConfigurationKeys.JOB_SCHEDULE_KEY, cronExpression); // Saves the following properties in jobProps to retrieve when the trigger fires prevJobProps.setProperty(ConfigurationKeys.SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY, String.valueOf(getUTCTimeFromDelayPeriod(delayPeriodMillis))); // Use the db laundered timestamp for the reminder to ensure consensus between hosts. Participant trigger timestamps // can differ between participants and be interpreted as a reminder for a distinct flow trigger which will cause // excess flows to be triggered by the reminder functionality. prevJobProps.setProperty(ConfigurationKeys.SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY, String.valueOf(leasedToAnotherStatus.getEventTimeMillis())); // Use this boolean to indicate whether this is a reminder event prevJobProps.setProperty(ConfigurationKeys.FLOW_IS_REMINDER_EVENT_KEY, String.valueOf(true)); // Update job data map and reset it in jobDetail jobDataMap.put(GobblinServiceJobScheduler.PROPERTIES_KEY, prevJobProps); return jobDataMap; } /** * Create a cron expression for the time that is delay milliseconds in the future * @param delayPeriodMillis * @return String representing cron schedule */ protected static String createCronFromDelayPeriod(long delayPeriodMillis) { LocalDateTime timeToScheduleReminder = getLocalDateTimeFromDelayPeriod(delayPeriodMillis); // TODO: investigate potentially better way of generating cron expression that does not make it US dependent DateTimeFormatter formatter = DateTimeFormatter.ofPattern("ss mm HH dd MM ? yyyy", Locale.US); return timeToScheduleReminder.format(formatter); } /** * Returns a LocalDateTime in UTC timezone that is delay milliseconds in the future */ protected static LocalDateTime getLocalDateTimeFromDelayPeriod(long delayPeriodMillis) { LocalDateTime now = LocalDateTime.now(ZoneId.of("UTC")); return now.plus(delayPeriodMillis, ChronoUnit.MILLIS); } /** * Takes a given delay period in milliseconds and returns the number of millseconds since epoch from current time */ protected static long getUTCTimeFromDelayPeriod(long delayPeriodMillis) { LocalDateTime localDateTime = getLocalDateTimeFromDelayPeriod(delayPeriodMillis); Date date = Date.from(localDateTime.atZone(ZoneId.of("UTC")).toInstant()); return GobblinServiceJobScheduler.utcDateAsUTCEpochMillis(date); } }
3,898
0
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/DagStateStore.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.IOException; import java.util.List; import java.util.Set; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.service.modules.flowgraph.Dag; import org.apache.gobblin.service.modules.spec.JobExecutionPlan; /** * An interface for storing and retrieving currently running {@link Dag<JobExecutionPlan>}s. In case of a leadership * change in the {@link org.apache.gobblin.service.modules.core.GobblinServiceManager}, the corresponding {@link DagManager} * loads the running {@link Dag}s from the {@link DagStateStore} to resume their execution. */ @Alpha public interface DagStateStore { /** * Persist the {@link Dag} to the backing store. * This is not an actual checkpoint but more like a Write-ahead log, where uncommitted job will be persisted * and be picked up again when leader transition happens. * @param dag The dag submitted to {@link DagManager} */ void writeCheckpoint(Dag<JobExecutionPlan> dag) throws IOException; /** * Delete the {@link Dag} from the backing store, typically upon completion of execution. * @param dag The dag completed/cancelled from execution on {@link org.apache.gobblin.runtime.api.SpecExecutor}. */ void cleanUp(Dag<JobExecutionPlan> dag) throws IOException; /** * Delete the {@link Dag} from the backing store, typically upon completion of execution. * @param dagId The ID of the dag to clean up. */ void cleanUp(String dagId) throws IOException; /** * Load all currently running {@link Dag}s from the underlying store. Typically, invoked when a new {@link DagManager} * takes over or on restart of service. * @return a {@link List} of currently running {@link Dag}s. */ List<Dag<JobExecutionPlan>> getDags() throws IOException; /** * Return a single dag from the dag state store. * @param dagId The ID of the dag to load. */ Dag<JobExecutionPlan> getDag(String dagId) throws IOException; /** * Return a list of all dag IDs contained in the dag state store. */ Set<String> getDagIds() throws IOException; }
3,899