index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/GobblinYarnAppLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.lang.reflect.Field;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeoutException;
import org.apache.avro.Schema;
import org.apache.commons.io.FileUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.Message;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinHelixConstants;
import org.apache.gobblin.cluster.GobblinHelixMultiManager;
import org.apache.gobblin.cluster.HelixMessageTestBase;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.cluster.TestHelper;
import org.apache.gobblin.cluster.TestShutdownMessageHandlerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.testing.AssertWithBackoff;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.when;
/**
* Unit tests for {@link GobblinYarnAppLauncher}.
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. The Curator
* framework is used to provide a ZooKeeper client. This class also uses the {@link HelixManager} to
* act as a testing Helix controller to receive the ApplicationMaster shutdown request message. It
* also starts a {@link MiniYARNCluster} so submission of a Gobblin Yarn application can be tested.
* A {@link YarnClient} is used to work with the {@link MiniYARNCluster}.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.yarn" }, singleThreaded=true)
public class GobblinYarnAppLauncherTest implements HelixMessageTestBase {
private static final Object MANAGED_HELIX_CLUSTER_NAME = "GobblinYarnAppLauncherTestManagedHelix";
public static final String TEST_HELIX_INSTANCE_NAME_MANAGED = HelixUtils.getHelixInstanceName("TestInstance", 1);
public static final String DYNAMIC_CONF_PATH = "dynamic.conf";
public static final String YARN_SITE_XML_PATH = "yarn-site.xml";
final Logger LOG = LoggerFactory.getLogger(GobblinYarnAppLauncherTest.class);
private YarnClient yarnClient;
private CuratorFramework curatorFramework;
private Config config;
private Config configManagedHelix;
private HelixManager helixManager;
private HelixManager helixManagerManagedHelix;
private GobblinYarnAppLauncher gobblinYarnAppLauncher;
private GobblinYarnAppLauncher gobblinYarnAppLauncherManagedHelix;
private ApplicationId applicationId;
private final Closer closer = Closer.create();
private static void setEnv(String key, String value) {
try {
Map<String, String> env = System.getenv();
Class<?> cl = env.getClass();
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Map<String, String> writableEnv = (Map<String, String>) field.get(env);
writableEnv.put(key, value);
} catch (Exception e) {
throw new IllegalStateException("Failed to set environment variable", e);
}
}
@BeforeClass
public void setUp() throws Exception {
// Set java home in environment since it isn't set on some systems
String javaHome = System.getProperty("java.home");
setEnv("JAVA_HOME", javaHome);
final YarnConfiguration clusterConf = new YarnConfiguration();
clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000");
MiniYARNCluster miniYARNCluster = this.closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1));
miniYARNCluster.init(clusterConf);
miniYARNCluster.start();
// YARN client should not be started before the Resource Manager is up
AssertWithBackoff.create().logger(LOG).timeoutMs(10000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
}
}, "Waiting for RM");
this.yarnClient = this.closer.register(YarnClient.createYarnClient());
this.yarnClient.init(clusterConf);
this.yarnClient.start();
// Use a random ZK port
TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
// the zk port is dynamically configured
try (PrintWriter pw = new PrintWriter(DYNAMIC_CONF_PATH)) {
File dir = new File("target/dummydir");
// dummy directory specified in configuration
dir.mkdir();
pw.println("gobblin.cluster.zk.connection.string=\"" + testingZKServer.getConnectString() + "\"");
pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\"");
}
// YARN config is dynamic and needs to be passed to other processes
try (OutputStream os = new FileOutputStream(new File(YARN_SITE_XML_PATH))) {
clusterConf.writeXml(os);
}
this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer);
URL url = GobblinYarnAppLauncherTest.class.getClassLoader()
.getResource(GobblinYarnAppLauncherTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.resolve();
String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
this.helixManager = HelixManagerFactory.getZKHelixManager(
this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), TestHelper.TEST_HELIX_INSTANCE_NAME,
InstanceType.CONTROLLER, zkConnectionString);
this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(this.config, clusterConf);
this.gobblinYarnAppLauncher.initializeYarnClients(this.config);
this.configManagedHelix = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.withValue(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY, ConfigValueFactory.fromAnyRef(MANAGED_HELIX_CLUSTER_NAME))
.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, ConfigValueFactory.fromAnyRef(TEST_HELIX_INSTANCE_NAME_MANAGED))
.withValue(GobblinClusterConfigurationKeys.IS_HELIX_CLUSTER_MANAGED, ConfigValueFactory.fromAnyRef("true"))
.resolve();
this.helixManagerManagedHelix = HelixManagerFactory.getZKHelixManager(
this.configManagedHelix.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), TEST_HELIX_INSTANCE_NAME_MANAGED,
InstanceType.PARTICIPANT, zkConnectionString);
this.gobblinYarnAppLauncherManagedHelix = new GobblinYarnAppLauncher(this.configManagedHelix, clusterConf);
this.gobblinYarnAppLauncherManagedHelix.initializeYarnClients(this.configManagedHelix);
}
@Test
public void testBuildApplicationMasterCommand() {
String command = this.gobblinYarnAppLauncher.buildApplicationMasterCommand("application_1234_3456", 64);
// 41 is from 64 * 0.8 - 10
Assert.assertTrue(command.contains("-Xmx41"));
Assert.assertTrue(command.contains("org.apache.gobblin.yarn.GobblinApplicationMaster"));
Assert.assertTrue(command.contains("GobblinApplicationMaster.stdout"));
Assert.assertTrue(command.contains("GobblinApplicationMaster.stderr"));
}
@Test
public void testCreateHelixCluster() throws Exception {
// This is tested here instead of in HelixUtilsTest to avoid setting up yet another testing ZooKeeper server.
HelixUtils.createGobblinHelixCluster(
this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY),
this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
Assert.assertEquals(this.curatorFramework.checkExists()
.forPath(String.format("/%s", GobblinYarnAppLauncherTest.class.getSimpleName())).getVersion(), 0);
Assert.assertEquals(
this.curatorFramework.checkExists()
.forPath(String.format("/%s/CONTROLLER", GobblinYarnAppLauncherTest.class.getSimpleName())).getVersion(),
0);
//Create managed Helix cluster and test it is created successfully
HelixUtils.createGobblinHelixCluster(
this.configManagedHelix.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY),
this.configManagedHelix.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
Assert.assertEquals(this.curatorFramework.checkExists()
.forPath(String.format("/%s/CONTROLLER", MANAGED_HELIX_CLUSTER_NAME)).getVersion(), 0);
Assert.assertEquals(
this.curatorFramework.checkExists()
.forPath(String.format("/%s/CONTROLLER", MANAGED_HELIX_CLUSTER_NAME)).getVersion(),
0);
}
/**
* For some yet unknown reason, hostname resolution for the ResourceManager in {@link MiniYARNCluster}
* has some issue that causes the {@link YarnClient} not be able to connect and submit the Gobblin Yarn
* application successfully. This works fine on local machine though. So disabling this and the test
* below that depends on it on Travis-CI.
*/
@Test(enabled=false, groups = { "disabledOnCI" }, dependsOnMethods = "testCreateHelixCluster")
public void testSetupAndSubmitApplication() throws Exception {
HelixUtils.createGobblinHelixCluster(
this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY),
this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
this.gobblinYarnAppLauncher.startYarnClient();
this.applicationId = this.gobblinYarnAppLauncher.setupAndSubmitApplication();
int i;
// wait for application to come up
for (i = 0; i < 120; i++) {
if (yarnClient.getApplicationReport(applicationId).getYarnApplicationState() ==
YarnApplicationState.RUNNING) {
break;
}
Thread.sleep(1000);
}
Assert.assertTrue(i < 120, "timed out waiting for RUNNING state");
// wait another 10 seconds and check state again to make sure that the application stays up
Thread.sleep(10000);
Assert.assertEquals(yarnClient.getApplicationReport(applicationId).getYarnApplicationState(),
YarnApplicationState.RUNNING, "Application may have aborted");
}
@Test(enabled=false, groups = { "disabledOnCI" }, dependsOnMethods = "testSetupAndSubmitApplication")
public void testGetReconnectableApplicationId() throws Exception {
Assert.assertEquals(this.gobblinYarnAppLauncher.getReconnectableApplicationId().get(), this.applicationId);
this.yarnClient.killApplication(this.applicationId);
Assert.assertEquals(yarnClient.getApplicationReport(applicationId).getYarnApplicationState(),
YarnApplicationState.KILLED, "Application not killed");
// takes some time for kill to take effect and app master to go down
Thread.sleep(5000);
}
@Test(dependsOnMethods = "testCreateHelixCluster")
public void testSendShutdownRequest() throws Exception {
this.helixManager.connect();
this.helixManager.getMessagingService().registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
new TestShutdownMessageHandlerFactory(this));
this.gobblinYarnAppLauncher.connectHelixManager();
this.gobblinYarnAppLauncher.sendShutdownRequest();
Assert.assertEquals(this.curatorFramework.checkExists()
.forPath(String.format("/%s/CONTROLLER/MESSAGES", GobblinYarnAppLauncherTest.class.getSimpleName()))
.getVersion(), 0);
YarnSecurityManagerTest.GetHelixMessageNumFunc getCtrlMessageNum =
new YarnSecurityManagerTest.GetHelixMessageNumFunc(GobblinYarnAppLauncherTest.class.getSimpleName(), InstanceType.CONTROLLER, "",
this.curatorFramework);
AssertWithBackoff assertWithBackoff =
AssertWithBackoff.create().logger(LoggerFactory.getLogger("testSendShutdownRequest")).timeoutMs(20000);
assertWithBackoff.assertEquals(getCtrlMessageNum, 1, "1 controller message queued");
// Give Helix sometime to handle the message
assertWithBackoff.assertEquals(getCtrlMessageNum, 0, "all controller messages processed");
this.helixManagerManagedHelix.connect();
this.helixManagerManagedHelix.getMessagingService().registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
new TestShutdownMessageHandlerFactory(this));
this.gobblinYarnAppLauncherManagedHelix.connectHelixManager();
this.gobblinYarnAppLauncherManagedHelix.sendShutdownRequest();
Assert.assertEquals(this.curatorFramework.checkExists()
.forPath(String.format("/%s/INSTANCES/%s/MESSAGES", this.configManagedHelix.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), TEST_HELIX_INSTANCE_NAME_MANAGED))
.getVersion(), 0);
YarnSecurityManagerTest.GetHelixMessageNumFunc getInstanceMessageNum =
new YarnSecurityManagerTest.GetHelixMessageNumFunc(this.configManagedHelix.getString(
GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY),
InstanceType.PARTICIPANT, TEST_HELIX_INSTANCE_NAME_MANAGED, this.curatorFramework);
assertWithBackoff =
AssertWithBackoff.create().logger(LoggerFactory.getLogger("testSendShutdownRequest")).timeoutMs(20000);
assertWithBackoff.assertEquals(getInstanceMessageNum, 1, "1 controller message queued");
// Give Helix sometime to handle the message
assertWithBackoff.assertEquals(getInstanceMessageNum, 0, "all controller messages processed");
}
@AfterClass
public void tearDown() throws IOException, TimeoutException {
try {
Files.deleteIfExists(Paths.get(DYNAMIC_CONF_PATH));
Files.deleteIfExists(Paths.get(YARN_SITE_XML_PATH));
this.gobblinYarnAppLauncher.stopYarnClient();
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
if (this.helixManagerManagedHelix.isConnected()) {
this.helixManagerManagedHelix.disconnect();
}
this.gobblinYarnAppLauncher.disconnectHelixManager();
if (applicationId != null) {
this.gobblinYarnAppLauncher.cleanUpAppWorkDirectory(applicationId);
}
} finally {
this.closer.close();
}
}
@Test(enabled = false)
@Override
public void assertMessageReception(Message message) {
Assert.assertEquals(message.getMsgType(), GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE);
Assert.assertEquals(message.getMsgSubType(), HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString());
}
/**
* Test that the dynamic config is added to the config specified when the {@link GobblinApplicationMaster}
* is instantiated.
*/
@Test
public void testDynamicConfig() throws Exception {
Config config = this.config.withFallback(
ConfigFactory.parseMap(
ImmutableMap.of(ConfigurationKeys.DYNAMIC_CONFIG_GENERATOR_CLASS_KEY,
TestDynamicConfigGenerator.class.getName())));
ContainerId containerId = ContainerId.newInstance(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0), 0);
TestApplicationMaster
appMaster = new TestApplicationMaster("testApp", containerId, config,
new YarnConfiguration());
Assert.assertEquals(appMaster.getConfig().getString("dynamicKey1"), "dynamicValue1");
Assert.assertEquals(appMaster.getConfig().getString(ConfigurationKeys.DYNAMIC_CONFIG_GENERATOR_CLASS_KEY),
TestDynamicConfigGenerator.class.getName());
ServiceBasedAppLauncher appLauncher = appMaster.getAppLauncher();
Field servicesField = ServiceBasedAppLauncher.class.getDeclaredField("services");
servicesField.setAccessible(true);
List<Service> services = (List<Service>) servicesField.get(appLauncher);
Optional<Service> yarnServiceOptional = services.stream().filter(e -> e instanceof YarnService).findFirst();
Assert.assertTrue(yarnServiceOptional.isPresent());
YarnService yarnService = (YarnService) yarnServiceOptional.get();
Field configField = YarnService.class.getDeclaredField("config");
configField.setAccessible(true);
Config yarnServiceConfig = (Config) configField.get(yarnService);
Assert.assertEquals(yarnServiceConfig.getString("dynamicKey1"), "dynamicValue1");
Assert.assertEquals(yarnServiceConfig.getString(ConfigurationKeys.DYNAMIC_CONFIG_GENERATOR_CLASS_KEY),
TestDynamicConfigGenerator.class.getName());
}
/**
* Test that the job cleanup call is called
*/
@Test
public void testJobCleanup() throws Exception {
ContainerId containerId = ContainerId.newInstance(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0), 0);
TestApplicationMaster
appMaster = Mockito.spy(new TestApplicationMaster("testApp", containerId, config,
new YarnConfiguration()));
GobblinHelixMultiManager mockMultiManager = Mockito.mock(GobblinHelixMultiManager.class);
appMaster.setMultiManager(mockMultiManager);
HelixManager mockHelixManager = Mockito.mock(HelixManager.class);
when(mockMultiManager.getJobClusterHelixManager()).thenReturn(mockHelixManager);
HelixAdmin mockHelixAdmin = Mockito.mock(HelixAdmin.class);
when(mockHelixManager.getClusterManagmentTool()).thenReturn(mockHelixAdmin);
HelixDataAccessor mockAccessor = Mockito.mock(HelixDataAccessor.class);
when(mockHelixManager.getHelixDataAccessor()).thenReturn(mockAccessor);
PropertyKey.Builder mockBuilder = Mockito.mock(PropertyKey.Builder.class);
when(mockAccessor.keyBuilder()).thenReturn(mockBuilder);
PropertyKey mockLiveInstancesKey = Mockito.mock(PropertyKey.class);
when(mockBuilder.liveInstances()).thenReturn(mockLiveInstancesKey);
Map<String, HelixProperty> mockChildValues = new HashMap<>();
when(mockAccessor.getChildValuesMap(mockLiveInstancesKey)).thenReturn(mockChildValues);
appMaster.start();
Mockito.verify(mockMultiManager, times(1)).cleanUpJobs();
}
@Test
public void testOutputConfig() throws IOException {
File tmpTestDir = com.google.common.io.Files.createTempDir();
try {
Path outputPath = Paths.get(tmpTestDir.toString(), "application.conf");
Config config = ConfigFactory.empty()
.withValue(ConfigurationKeys.FS_URI_KEY, ConfigValueFactory.fromAnyRef("file:///"))
.withValue(GobblinYarnAppLauncher.GOBBLIN_YARN_CONFIG_OUTPUT_PATH,
ConfigValueFactory.fromAnyRef(outputPath.toString()));
GobblinYarnAppLauncher.outputConfigToFile(config);
String configString = com.google.common.io.Files.toString(outputPath.toFile(), Charsets.UTF_8);
Assert.assertTrue(configString.contains("fs"));
} finally {
FileUtils.deleteDirectory(tmpTestDir);
}
}
@Test
public void testAddMetricReportingDynamicConfig()
throws IOException {
KafkaAvroSchemaRegistry schemaRegistry = Mockito.mock(KafkaAvroSchemaRegistry.class);
Mockito.when(schemaRegistry.register(Mockito.any(Schema.class), Mockito.anyString())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) {
return "testId";
}
});
Config config = ConfigFactory.empty().withValue(ConfigurationKeys.METRICS_KAFKA_TOPIC_EVENTS, ConfigValueFactory.fromAnyRef("topic"))
.withValue(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY, ConfigValueFactory.fromAnyRef(true))
.withValue(ConfigurationKeys.METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY, ConfigValueFactory.fromAnyRef(true))
.withValue(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, ConfigValueFactory.fromAnyRef("http://testSchemaReg:0000"));
config = GobblinYarnAppLauncher.addMetricReportingDynamicConfig(config, schemaRegistry);
Assert.assertEquals(config.getString(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_AVRO_SCHEMA_ID), "testId");
Assert.assertFalse(config.hasPath(ConfigurationKeys.METRICS_REPORTING_METRICS_KAFKA_AVRO_SCHEMA_ID));
}
/**
* An application master for accessing protected fields in {@link GobblinApplicationMaster}
* for testing.
*/
private static class TestApplicationMaster extends GobblinApplicationMaster {
public TestApplicationMaster(String applicationName, ContainerId containerId, Config config,
YarnConfiguration yarnConfiguration)
throws Exception {
super(applicationName, containerId.getApplicationAttemptId().getApplicationId().toString(), containerId, config, yarnConfiguration);
}
@Override
protected YarnService buildYarnService(Config config, String applicationName, String applicationId,
YarnConfiguration yarnConfiguration, FileSystem fs) throws Exception {
YarnService testYarnService = new TestYarnService(config, applicationName, applicationId, yarnConfiguration, fs,
new EventBus("GobblinYarnAppLauncherTest"));
return testYarnService;
}
public Config getConfig() {
return this.config;
}
public ServiceBasedAppLauncher getAppLauncher() {
return this.applicationLauncher;
}
public void setMultiManager(GobblinHelixMultiManager multiManager) {
this.multiManager = multiManager;
}
}
/**
* Class for testing that dynamic config is injected
*/
@VisibleForTesting
public static class TestDynamicConfigGenerator implements DynamicConfigGenerator {
public TestDynamicConfigGenerator() {
}
@Override
public Config generateDynamicConfig(Config config) {
return ConfigFactory.parseMap(ImmutableMap.of("dynamicKey1", "dynamicValue1"));
}
}
/**
* Test class for mocking out YarnService. Need to use this instead of Mockito because of final methods.
*/
private static class TestYarnService extends YarnService {
public TestYarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus) throws Exception {
super(config, applicationName, applicationId, yarnConfiguration, fs, eventBus, null, null);
}
@Override
protected void startUp()
throws Exception {
// do nothing
}
}
}
| 1,900 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/HelixInstancePurgerWithMetricsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import com.google.common.base.Stopwatch;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Mockito.times;
import static org.testng.Assert.assertEquals;
public class HelixInstancePurgerWithMetricsTest {
@Mock EventSubmitter eventSubmitter;
@Mock Stopwatch stopwatch;
@Mock CompletableFuture<Void> mockTask;
@Captor ArgumentCaptor<GobblinEventBuilder> gteCaptor;
HelixInstancePurgerWithMetrics sut;
private static final long LAGGING_PURGE_THRESHOLD_MS = 100;
private static final long PURGE_STATUS_POLLING_RATE_MS = 10;
@BeforeMethod
private void init() {
MockitoAnnotations.openMocks(this);
sut = new HelixInstancePurgerWithMetrics(eventSubmitter, PURGE_STATUS_POLLING_RATE_MS);
}
@Test
public void testPurgeOfflineInstances() throws ExecutionException, InterruptedException {
Mockito.when(stopwatch.start()).thenReturn(stopwatch);
Mockito.when(stopwatch.elapsed(TimeUnit.MILLISECONDS)).thenReturn(LAGGING_PURGE_THRESHOLD_MS);
Mockito.when(mockTask.isDone())
.thenReturn(false)
.thenReturn(true);
Mockito.when(mockTask.get()).thenReturn(null);
Mockito.doNothing().when(eventSubmitter).submit(Mockito.any(GobblinEventBuilder.class));
long elapsedTime = sut.waitForPurgeCompletion(mockTask, LAGGING_PURGE_THRESHOLD_MS, stopwatch, Collections.emptyMap());
assertEquals(elapsedTime, LAGGING_PURGE_THRESHOLD_MS);
Mockito.verify(stopwatch, times(1)).start();
Mockito.verify(mockTask, times(1)).get();
Mockito.verify(eventSubmitter, times(1)).submit(gteCaptor.capture());
assertEquals(gteCaptor.getValue().getName(), HelixInstancePurgerWithMetrics.PURGE_COMPLETED_EVENT);
}
@Test
public void testPurgeOfflineInstancesSendsWarningEventWhenWaiting() throws ExecutionException, InterruptedException {
Mockito.when(mockTask.isDone()).thenReturn(false).thenReturn(true);
testPurgeOfflineInstancesSendsWarningEventHelper();
}
@Test
public void testPurgeOfflineInstancesSendsWarningEventIfTaskFinishedImmediately() throws ExecutionException, InterruptedException {
Mockito.when(mockTask.isDone()).thenReturn(true);
testPurgeOfflineInstancesSendsWarningEventHelper();
}
private void testPurgeOfflineInstancesSendsWarningEventHelper() throws ExecutionException, InterruptedException {
Mockito.when(stopwatch.start()).thenReturn(stopwatch);
Mockito.when(stopwatch.elapsed(TimeUnit.MILLISECONDS)).thenReturn(LAGGING_PURGE_THRESHOLD_MS + 1);
Mockito.when(mockTask.isDone()).thenReturn(false).thenReturn(true);
Mockito.when(mockTask.get()).thenReturn(null);
Mockito.doNothing().when(eventSubmitter).submit(Mockito.any(GobblinEventBuilder.class));
long elapsedTime = sut.waitForPurgeCompletion(mockTask, LAGGING_PURGE_THRESHOLD_MS, stopwatch, Collections.emptyMap());
assertEquals(elapsedTime, LAGGING_PURGE_THRESHOLD_MS + 1);
Mockito.verify(stopwatch, times(1)).start();
Mockito.verify(mockTask, times(1)).get();
Mockito.verify(eventSubmitter, times(2)).submit(gteCaptor.capture());
assertEquals(gteCaptor.getAllValues().get(0).getName(), HelixInstancePurgerWithMetrics.PURGE_LAGGING_EVENT);
assertEquals(gteCaptor.getAllValues().get(1).getName(), HelixInstancePurgerWithMetrics.PURGE_COMPLETED_EVENT);
}
@Test
public void testPurgeOfflineInstancesSendsFailureEvent() throws ExecutionException, InterruptedException {
Mockito.when(stopwatch.start()).thenReturn(stopwatch);
Mockito.when(stopwatch.elapsed(TimeUnit.MILLISECONDS)).thenReturn(LAGGING_PURGE_THRESHOLD_MS + 1);
Mockito.when(mockTask.isDone()).thenReturn(true);
Mockito.when(mockTask.get()).thenThrow(new ExecutionException("Throwing exception to emulate helix failure", new RuntimeException()));
Mockito.doNothing().when(eventSubmitter).submit(Mockito.any(GobblinEventBuilder.class));
long elapsedTime = sut.waitForPurgeCompletion(mockTask, LAGGING_PURGE_THRESHOLD_MS, stopwatch, Collections.emptyMap());
assertEquals(elapsedTime, LAGGING_PURGE_THRESHOLD_MS +1);
Mockito.verify(stopwatch, times(1)).start();
Mockito.verify(mockTask, times(1)).get();
Mockito.verify(eventSubmitter, times(2)).submit(gteCaptor.capture());
assertEquals(gteCaptor.getAllValues().get(0).getName(), HelixInstancePurgerWithMetrics.PURGE_LAGGING_EVENT);
assertEquals(gteCaptor.getAllValues().get(1).getName(), HelixInstancePurgerWithMetrics.PURGE_FAILURE_EVENT);
}
}
| 1,901 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnServiceIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Predicate;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Tests for {@link YarnService}.
*/
@Test(groups = {"gobblin.yarn", "disabledOnCI"}, singleThreaded=true)
public class YarnServiceIT {
final Logger LOG = LoggerFactory.getLogger(YarnServiceIT.class);
private YarnClient yarnClient;
private MiniYARNCluster yarnCluster;
private TestYarnService yarnService;
private Config config;
private YarnConfiguration clusterConf;
private ApplicationId applicationId;
private ApplicationAttemptId applicationAttemptId;
private final EventBus eventBus = new EventBus("YarnServiceIT");
private final Closer closer = Closer.create();
private static void setEnv(String key, String value) {
try {
Map<String, String> env = System.getenv();
Class<?> cl = env.getClass();
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Map<String, String> writableEnv = (Map<String, String>) field.get(env);
writableEnv.put(key, value);
} catch (Exception e) {
throw new IllegalStateException("Failed to set environment variable", e);
}
}
@BeforeClass
public void setUp() throws Exception {
// Set java home in environment since it isn't set on some systems
String javaHome = System.getProperty("java.home");
setEnv("JAVA_HOME", javaHome);
this.clusterConf = new YarnConfiguration();
this.clusterConf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "100");
this.clusterConf.set(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, "10000");
this.clusterConf.set(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, "60000");
this.yarnCluster =
this.closer.register(new MiniYARNCluster("YarnServiceTestCluster", 4, 1,
1));
this.yarnCluster.init(this.clusterConf);
this.yarnCluster.start();
// YARN client should not be started before the Resource Manager is up
AssertWithBackoff.create().logger(LOG).timeoutMs(10000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
}
}, "Waiting for RM");
this.yarnClient = this.closer.register(YarnClient.createYarnClient());
this.yarnClient.init(this.clusterConf);
this.yarnClient.start();
URL url = YarnServiceIT.class.getClassLoader()
.getResource(YarnServiceIT.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.config = ConfigFactory.parseURL(url).resolve();
// Start a dummy application manager so that the YarnService can use the AM-RM token.
startApp();
// create and start the test yarn service
this.yarnService = new TestYarnService(this.config, "testApp", "appId",
this.clusterConf,
FileSystem.getLocal(new Configuration()), this.eventBus);
this.yarnService.startUp();
}
private void startApp() throws Exception {
// submit a dummy app
ApplicationSubmissionContext appSubmissionContext =
yarnClient.createApplication().getApplicationSubmissionContext();
this.applicationId = appSubmissionContext.getApplicationId();
ContainerLaunchContext containerLaunchContext =
BuilderUtils.newContainerLaunchContext(Collections.emptyMap(), Collections.emptyMap(),
Arrays.asList("sleep", "100"), Collections.emptyMap(), null, Collections.emptyMap());
// Setup the application submission context
appSubmissionContext.setApplicationName("TestApp");
appSubmissionContext.setResource(Resource.newInstance(128, 1));
appSubmissionContext.setPriority(Priority.newInstance(0));
appSubmissionContext.setAMContainerSpec(containerLaunchContext);
this.yarnClient.submitApplication(appSubmissionContext);
// wait for application to be accepted
int i;
RMAppAttempt attempt = null;
for (i = 0; i < 120; i++) {
ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);
if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
this.applicationAttemptId = appReport.getCurrentApplicationAttemptId();
attempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
.get(appReport.getCurrentApplicationAttemptId().getApplicationId()).getCurrentAppAttempt();
break;
}
Thread.sleep(1000);
}
Assert.assertTrue(i < 120, "timed out waiting for ACCEPTED state");
// Set the AM-RM token in the UGI for access during testing
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser()
.getUserName()));
UserGroupInformation.getCurrentUser().addToken(attempt.getAMRMToken());
}
@AfterClass
public void tearDown() throws IOException, TimeoutException, YarnException {
try {
this.yarnClient.killApplication(this.applicationAttemptId.getApplicationId());
this.yarnService.shutDown();
} finally {
this.closer.close();
}
}
/**
* Test that the dynamic config is added to the config specified when the {@link GobblinApplicationMaster}
* is instantiated.
*/
@Test(groups = {"gobblin.yarn", "disabledOnCI"})
public void testScaleUp() {
Resource resource = Resource.newInstance(64, 1);
this.yarnService.requestTargetNumberOfContainers(
GobblinYarnTestUtils.createYarnContainerRequest(10, resource), Collections.EMPTY_SET);
Assert.assertFalse(this.yarnService.getMatchingRequestsList(resource).isEmpty());
Assert.assertTrue(this.yarnService.waitForContainerCount(10, 60000));
Assert.assertEquals(this.yarnService.getContainerMap().size(), 10);
// container request list that had entries earlier should now be empty
Assert.assertEquals(this.yarnService.getMatchingRequestsList(resource).size(), 0);
}
@Test(groups = {"gobblin.yarn", "disabledOnCI"}, dependsOnMethods = "testScaleUp")
public void testScaleDownWithInUseInstances() {
Set<String> inUseInstances = new HashSet<>();
for (int i = 1; i <= 8; i++) {
inUseInstances.add("GobblinYarnTaskRunner_" + i);
}
Resource resource = Resource.newInstance(64, 1);
this.yarnService.requestTargetNumberOfContainers(
GobblinYarnTestUtils.createYarnContainerRequest(6, resource), inUseInstances);
// will only be able to shrink to 8
Assert.assertTrue(this.yarnService.waitForContainerCount(8, 60000));
// will not be able to shrink to 6 due to 8 in-use instances
Assert.assertFalse(this.yarnService.waitForContainerCount(6, 10000));
Assert.assertEquals(this.yarnService.getContainerMap().size(), 8);
}
@Test(groups = {"gobblin.yarn", "disabledOnCI"}, dependsOnMethods = "testScaleDownWithInUseInstances")
public void testScaleDown() throws Exception {
Resource resource = Resource.newInstance(64, 1);
this.yarnService.requestTargetNumberOfContainers(
GobblinYarnTestUtils.createYarnContainerRequest(4, resource), Collections.EMPTY_SET);
Assert.assertTrue(this.yarnService.waitForContainerCount(4, 60000));
Assert.assertEquals(this.yarnService.getContainerMap().size(), 4);
}
// Keep this test last since it interferes with the container counts in the prior tests.
@Test(groups = {"gobblin.yarn", "disabledOnCI"}, dependsOnMethods = "testScaleDown")
public void testReleasedContainerCache() throws Exception {
Config modifiedConfig = this.config
.withValue(GobblinYarnConfigurationKeys.RELEASED_CONTAINERS_CACHE_EXPIRY_SECS, ConfigValueFactory.fromAnyRef("2"));
TestYarnService yarnService =
new TestYarnService(modifiedConfig, "testApp1", "appId1",
this.clusterConf, FileSystem.getLocal(new Configuration()), this.eventBus);
ContainerId containerId1 = ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 0),
0), 0);
yarnService.getReleasedContainerCache().put(containerId1, "");
Assert.assertTrue(yarnService.getReleasedContainerCache().getIfPresent(containerId1) != null);
// give some time for element to expire
Thread.sleep(4000);
Assert.assertTrue(yarnService.getReleasedContainerCache().getIfPresent(containerId1) == null);
}
/**
* Test if requested resource exceed the resource limit, yarnService should fail.
*/
@Test(groups = {"gobblin.yarn", "disabledOnCI"}, expectedExceptions = IllegalArgumentException.class)
public void testExceedResourceLimit() {
Resource resource = Resource.newInstance(204800, 10240);
this.yarnService.requestTargetNumberOfContainers(
GobblinYarnTestUtils.createYarnContainerRequest(10, resource), Collections.EMPTY_SET);
}
static class TestYarnService extends YarnService {
public TestYarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus) throws Exception {
super(config, applicationName, applicationId, yarnConfiguration, fs, eventBus, getMockHelixManager(config), getMockHelixAdmin());
}
private static HelixManager getMockHelixManager(Config config) {
HelixManager helixManager = Mockito.mock(HelixManager.class);
HelixDataAccessor helixDataAccessor = Mockito.mock(HelixDataAccessor.class);
PropertyKey propertyKey = Mockito.mock(PropertyKey.class);
PropertyKey.Builder propertyKeyBuilder = Mockito.mock(PropertyKey.Builder.class);
Mockito.when(helixManager.getInstanceName()).thenReturn("helixInstance1");
Mockito.when(helixManager.getClusterName()).thenReturn(config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
Mockito.when(helixManager.getHelixDataAccessor()).thenReturn(helixDataAccessor);
Mockito.when(helixManager.getMetadataStoreConnectionString()).thenReturn("stub");
Mockito.when(helixDataAccessor.keyBuilder()).thenReturn(propertyKeyBuilder);
Mockito.when(propertyKeyBuilder.liveInstance(Mockito.anyString())).thenReturn(propertyKey);
Mockito.when(helixDataAccessor.getProperty(propertyKey)).thenReturn(null);
return helixManager;
}
private static HelixAdmin getMockHelixAdmin() {
HelixAdmin helixAdmin = Mockito.mock(HelixAdmin.class);
Mockito.doNothing().when(helixAdmin).purgeOfflineInstances(Mockito.anyString(), Mockito.anyLong());
Mockito.doNothing().when(helixAdmin).enableInstance(Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean());
return helixAdmin;
}
protected ContainerLaunchContext newContainerLaunchContext(ContainerInfo containerInfo)
throws IOException {
return BuilderUtils.newContainerLaunchContext(Collections.emptyMap(), Collections.emptyMap(),
Arrays.asList("sleep", "60000"), Collections.emptyMap(), null, Collections.emptyMap());
}
/**
* Get the list of matching container requests for the specified resource memory and cores.
*/
public List<? extends Collection<AMRMClient.ContainerRequest>> getMatchingRequestsList(Resource resource) {
Priority priority = Priority.newInstance(0);
return getAmrmClientAsync().getMatchingRequests(priority, ResourceRequest.ANY, resource);
}
/**
* Wait to reach the expected count.
*
* @param expectedCount the expected count
* @param waitMillis amount of time in milliseconds to wait
* @return true if the count was reached within the allowed wait time
*/
public boolean waitForContainerCount(int expectedCount, int waitMillis) {
final int waitInterval = 1000;
int waitedMillis = 0;
boolean success = false;
while (waitedMillis < waitMillis) {
try {
Thread.sleep(waitInterval);
waitedMillis += waitInterval;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
ConcurrentMap<ContainerId, ContainerInfo> containerMap = getContainerMap();
if (expectedCount == getContainerMap().size()) {
success = true;
break;
}
}
return success;
}
}
}
| 1,902 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnMetricTagNames.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
/**
* A central place for constants of {@link org.apache.gobblin.metrics.MetricContext} tag names for Gobblin on Yarn.
*
* @author Yinan Li
*/
public class GobblinYarnMetricTagNames {
public static final String YARN_APPLICATION_ATTEMPT_ID = "yarn.application.attempt.id";
public static final String CONTAINER_ID = "yarn.container.id";
}
| 1,903 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnContainerSecurityManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import java.io.IOException;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class for managing token renewing in the containers including the container for the
* {@link GobblinApplicationMaster}.
*
* <p>
* This class implements a simple monitor for modifications on the token file and reloads tokens
* in the token file if the file has been modified and adds the tokens to the credentials of the
* current login user.
* </p>
*
* @author Yinan Li
*/
public class YarnContainerSecurityManager extends AbstractIdleService {
private static final Logger LOGGER = LoggerFactory.getLogger(YarnContainerSecurityManager.class);
private final FileSystem fs;
private final Path tokenFilePath;
private final EventBus eventBus;
private final LogCopier logCopier;
public YarnContainerSecurityManager(Config config, FileSystem fs, EventBus eventBus) {
this(config, fs, eventBus, null);
}
public YarnContainerSecurityManager(Config config, FileSystem fs, EventBus eventBus, LogCopier logCopier) {
this.fs = fs;
this.tokenFilePath = new Path(this.fs.getHomeDirectory(),
config.getString(GobblinYarnConfigurationKeys.APPLICATION_NAME_KEY) + Path.SEPARATOR
+ GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
this.eventBus = eventBus;
this.logCopier = logCopier;
}
@SuppressWarnings("unused")
@Subscribe
public void handleTokenFileUpdatedEvent(DelegationTokenUpdatedEvent delegationTokenUpdatedEvent) {
try {
addCredentials(readCredentials(this.tokenFilePath));
if (this.logCopier != null) {
this.logCopier.setNeedToUpdateDestFs(true);
this.logCopier.setNeedToUpdateSrcFs(true);
}
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
}
@Override
protected void startUp() throws Exception {
this.eventBus.register(this);
}
@Override
protected void shutDown() throws Exception {
// Nothing to do
LOGGER.info("Attempt to shut down YarnContainerSecurityManager");
}
/**
* Read the {@link Token}s stored in the token file.
*/
@VisibleForTesting
Credentials readCredentials(Path tokenFilePath) throws IOException {
LOGGER.info("Reading updated credentials from token file: " + tokenFilePath);
return Credentials.readTokenStorageFile(tokenFilePath, this.fs.getConf());
}
@VisibleForTesting
void addCredentials(Credentials credentials) throws IOException {
for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
LOGGER.info("updating " + token.toString());
}
UserGroupInformation.getCurrentUser().addCredentials(credentials);
}
}
| 1,904 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManagerWithKeytabs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.ScheduledFuture;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.hadoop.TokenUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.helix.HelixManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.typesafe.config.Config;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
/**
* A class for managing Kerberos login and token renewing on the client side that has access to
* the keytab file.
*
* <p>
* This class works with {@link YarnContainerSecurityManager} to manage renewing of delegation
* tokens across the application. This class is responsible for login through a Kerberos keytab,
* renewing the delegation token, and storing the token to a token file on HDFS. It sends a
* Helix message to the controller and all the participants upon writing the token to the token
* file, which rely on the {@link YarnContainerSecurityManager} to read the token in the file
* upon receiving the message.
* </p>
*
* <p>
* This class uses a scheduled task to do Kerberos re-login to renew the Kerberos ticket on a
* configurable schedule if login is from a keytab file. It also uses a second scheduled task
* to renew the delegation token after each login. Both the re-login interval and the token
* renewing interval are configurable.
* </p>
*
* @author Yinan Li
*/
public class YarnAppSecurityManagerWithKeytabs extends AbstractYarnAppSecurityManager {
private UserGroupInformation loginUser;
private Optional<ScheduledFuture<?>> scheduledTokenRenewTask = Optional.absent();
// This flag is used to tell if this is the first login. If yes, no token updated message will be
// sent to the controller and the participants as they may not be up running yet. The first login
// happens after this class starts up so the token gets regularly refreshed before the next login.
public YarnAppSecurityManagerWithKeytabs(Config config, HelixManager helixManager, FileSystem fs, Path tokenFilePath)
throws IOException {
super(config, helixManager, fs, tokenFilePath);
this.loginUser = UserGroupInformation.getLoginUser();
}
/**
* Renew the existing delegation token.
*/
protected synchronized void renewDelegationToken() throws IOException, InterruptedException {
LOGGER.debug("renewing all tokens {}", credentials.getAllTokens());
credentials.getAllTokens().forEach(
existingToken -> {
try {
long expiryTime = existingToken.renew(this.fs.getConf());
LOGGER.info("renewed token: {}, expiryTime: {}, Id; {}", existingToken, expiryTime, Arrays.toString(existingToken.getIdentifier()));
// TODO: If token failed to get renewed in case its expired ( can be detected via the error text ),
// it should call the login() to reissue the new tokens
} catch (IOException | InterruptedException e) {
LOGGER.error("Error renewing token: " + existingToken + " ,error: " + e, e);
}
}
);
writeDelegationTokenToFile(credentials);
if (!this.firstLogin) {
LOGGER.info("This is not a first login, sending TokenFileUpdatedMessage.");
sendTokenFileUpdatedMessage();
} else {
LOGGER.info("This is first login of the interval, so skipping sending TokenFileUpdatedMessage.");
}
}
/**
* Get a new delegation token for the current logged-in user.
*/
@VisibleForTesting
synchronized void getNewDelegationTokenForLoginUser() throws IOException, InterruptedException {
final Configuration newConfig = new Configuration();
final Credentials allCreds = new Credentials();
// Text renewer = TokenUtils.getMRTokenRenewerInternal(new JobConf());
String renewer = UserGroupInformation.getLoginUser().getShortUserName();
LOGGER.info("creating new login tokens with renewer: {}", renewer);
TokenUtils.getAllFSTokens(newConfig, allCreds, renewer, Optional.absent(), ConfigUtils.getStringList(this.config, TokenUtils.OTHER_NAMENODES));
//TODO: Any other required tokens can be fetched here based on config or any other detection mechanism
LOGGER.debug("All new tokens in credential: {}", allCreds.getAllTokens());
this.credentials = allCreds;
}
/**
* Login the user from a given keytab file.
*/
protected synchronized void login() throws IOException, InterruptedException {
String keyTabFilePath = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_FILE_PATH);
if (Strings.isNullOrEmpty(keyTabFilePath)) {
throw new IOException("Keytab file path is not defined for Kerberos login");
}
if (!new File(keyTabFilePath).exists()) {
throw new IOException("Keytab file not found at: " + keyTabFilePath);
}
String principal = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_PRINCIPAL_NAME);
if (Strings.isNullOrEmpty(principal)) {
principal = this.loginUser.getShortUserName() + "/localhost@LOCALHOST";
}
LOGGER.info("Login using kerberos principal : "+ principal);
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.toString().toLowerCase());
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(principal, keyTabFilePath);
LOGGER.info(String.format("Logged in from keytab file %s using principal %s for user: %s", keyTabFilePath, principal, this.loginUser));
getNewDelegationTokenForLoginUser();
writeDelegationTokenToFile(this.credentials);
UserGroupInformation.getCurrentUser().addCredentials(this.credentials);
if (!this.firstLogin) {
LOGGER.info("This is not a first login, sending TokenFileUpdatedMessage from Login().");
sendTokenFileUpdatedMessage();
}else {
LOGGER.info("This is first login of the interval, so skipping sending TokenFileUpdatedMessage from Login().");
}
}
}
| 1,905 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppMasterSecurityManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Throwables;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
public class YarnAppMasterSecurityManager extends YarnContainerSecurityManager{
private YarnService yarnService;
public YarnAppMasterSecurityManager(Config config, FileSystem fs, EventBus eventBus, LogCopier logCopier, YarnService yarnService) {
super(config, fs, eventBus, logCopier);
this.yarnService = yarnService;
}
@Override
public void handleTokenFileUpdatedEvent(DelegationTokenUpdatedEvent delegationTokenUpdatedEvent) {
super.handleTokenFileUpdatedEvent(delegationTokenUpdatedEvent);
try {
yarnService.updateToken();
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
}
}
| 1,906 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnContainerRequestBundle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import com.google.common.base.Preconditions;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* The class that represents current Yarn container request that will be used by {link @YarnService}.
* Yarn container allocation should associate with helix tag, as workflows can have specific helix tag setup
* and specific resource requirement.
*/
@Slf4j
@Getter
public class YarnContainerRequestBundle {
int totalContainers;
private final Map<String, Integer> helixTagContainerCountMap;
private final Map<String, Resource> helixTagResourceMap;
private final Map<String, Set<String>> resourceHelixTagMap;
public YarnContainerRequestBundle() {
this.totalContainers = 0;
this.helixTagContainerCountMap = new HashMap<>();
this.helixTagResourceMap = new HashMap<>();
this.resourceHelixTagMap = new HashMap<>();
}
public void add(String helixTag, int containerCount, Resource resource) {
helixTagContainerCountMap.put(helixTag, helixTagContainerCountMap.getOrDefault(helixTag, 0) + containerCount);
if(helixTagResourceMap.containsKey(helixTag)) {
Resource existedResource = helixTagResourceMap.get(helixTag);
Preconditions.checkArgument(resource.getMemory() == existedResource.getMemory() &&
resource.getVirtualCores() == existedResource.getVirtualCores(),
"Helix tag need to have consistent resource requirement. Tag " + helixTag
+ " has existed resource require " + existedResource.toString() + " and different require " + resource.toString());
} else {
helixTagResourceMap.put(helixTag, resource);
Set<String> tagSet = resourceHelixTagMap.getOrDefault(resource.toString(), new HashSet<>());
tagSet.add(helixTag);
resourceHelixTagMap.put(resource.toString(), tagSet);
}
totalContainers += containerCount;
}
// This method assumes the resource requirement for the helix tag is already stored in the map
public void add(String helixTag, int containerCount) {
if (!helixTagContainerCountMap.containsKey(helixTag) && !helixTagResourceMap.containsKey(helixTag)) {
log.error("Helix tag {} is not present in the request bundle yet, can't process the request to add {} "
+ "container for it without specifying the resource requirement", helixTag, containerCount);
return;
}
helixTagContainerCountMap.put(helixTag, helixTagContainerCountMap.get(helixTag) + containerCount);
this.totalContainers += containerCount;
}
} | 1,907 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinApplicationMaster.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import lombok.Getter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinHelixMultiManager;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
/**
* The Yarn ApplicationMaster class for Gobblin.
*
* <p>
* This class runs the {@link YarnService} for all Yarn-related stuffs like ApplicationMaster registration
* and un-registration and Yarn container provisioning.
* </p>
*
* @author Yinan Li
*/
@Alpha
public class GobblinApplicationMaster extends GobblinClusterManager {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinApplicationMaster.class);
@Getter
private final YarnService yarnService;
private LogCopier logCopier;
public GobblinApplicationMaster(String applicationName, String applicationId, ContainerId containerId, Config config,
YarnConfiguration yarnConfiguration) throws Exception {
super(applicationName, applicationId, config.withValue(GobblinYarnConfigurationKeys.CONTAINER_NUM_KEY,
ConfigValueFactory.fromAnyRef(YarnHelixUtils.getContainerNum(containerId.toString()))),
Optional.<Path>absent());
String containerLogDir = config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
if (gobblinYarnLogSource.isLogSourcePresent()) {
Path appWorkDir = PathUtils.combinePaths(containerLogDir, GobblinClusterUtils.getAppWorkDirPath(this.clusterName, this.applicationId), "AppMaster");
logCopier = gobblinYarnLogSource.buildLogCopier(this.config, containerId.toString(), this.fs, appWorkDir);
this.applicationLauncher
.addService(logCopier);
}
YarnHelixUtils.setYarnClassPath(config, yarnConfiguration);
YarnHelixUtils.setAdditionalYarnClassPath(config, yarnConfiguration);
this.yarnService = buildYarnService(this.config, applicationName, this.applicationId, yarnConfiguration, this.fs);
this.applicationLauncher.addService(this.yarnService);
if (UserGroupInformation.isSecurityEnabled()) {
LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
this.applicationLauncher.addService(buildYarnContainerSecurityManager(this.config, this.fs));
}
// Add additional services
List<String> serviceClassNames = ConfigUtils.getStringList(this.config,
GobblinYarnConfigurationKeys.APP_MASTER_SERVICE_CLASSES);
for (String serviceClassName : serviceClassNames) {
Class<?> serviceClass = Class.forName(serviceClassName);
this.applicationLauncher.addService((Service) GobblinConstructorUtils.invokeLongestConstructor(serviceClass, this));
}
}
/**
* Build the {@link YarnService} for the Application Master.
*/
protected YarnService buildYarnService(Config config, String applicationName, String applicationId,
YarnConfiguration yarnConfiguration, FileSystem fs)
throws Exception {
return new YarnService(config, applicationName, applicationId, yarnConfiguration, fs, this.eventBus,
this.multiManager.getJobClusterHelixManager(), this.multiManager.getJobClusterHelixAdmin());
}
/**
* Build the {@link YarnAppMasterSecurityManager} for the Application Master.
*/
private YarnContainerSecurityManager buildYarnContainerSecurityManager(Config config, FileSystem fs) {
return new YarnAppMasterSecurityManager(config, fs, this.eventBus, this.logCopier, this.yarnService);
}
@Override
protected MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() {
return new ControllerUserDefinedMessageHandlerFactory();
}
@Override
public synchronized void setupHelix() {
super.setupHelix();
this.disableTaskRunnersFromPreviousExecutions(this.multiManager);
}
/**
* A method to disable pre-existing live instances in a Helix cluster. This can happen when a previous Yarn application
* leaves behind orphaned Yarn worker processes. Since Helix does not provide an API to drop a live instance, we use
* the disable instance API to fence off these orphaned instances and prevent them from becoming participants in the
* new cluster.
*
* NOTE: this is a workaround for an existing YARN bug. Once YARN has a fix to guarantee container kills on application
* completion, this method should be removed.
*/
public static void disableTaskRunnersFromPreviousExecutions(GobblinHelixMultiManager multiManager) {
HelixManager helixManager = multiManager.getJobClusterHelixManager();
HelixDataAccessor helixDataAccessor = helixManager.getHelixDataAccessor();
String clusterName = helixManager.getClusterName();
HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
Set<String> taskRunners = HelixUtils.getParticipants(helixDataAccessor,
GobblinYarnTaskRunner.HELIX_YARN_INSTANCE_NAME_PREFIX);
LOGGER.warn("Found {} task runners in the cluster.", taskRunners.size());
for (String taskRunner : taskRunners) {
LOGGER.warn("Disabling instance: {}", taskRunner);
helixAdmin.enableInstance(clusterName, taskRunner, false);
}
}
/**
* A custom {@link MultiTypeMessageHandlerFactory} for {@link ControllerUserDefinedMessageHandler}s that
* handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}.
*/
private class ControllerUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return new ControllerUserDefinedMessageHandler(message, context);
}
@Override
public String getMessageType() {
return Message.MessageType.USER_DEFINE_MSG.toString();
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
/**
* A custom {@link MessageHandler} for handling user-defined messages to the controller.
*
* <p>
* Currently it handles the following sub types of messages:
*
* <ul>
* <li>{@link HelixMessageSubTypes#TOKEN_FILE_UPDATED}</li>
* </ul>
* </p>
*/
private class ControllerUserDefinedMessageHandler extends MessageHandler {
public ControllerUserDefinedMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() {
String messageSubType = this._message.getMsgSubType();
if (messageSubType.equalsIgnoreCase(HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString())) {
LOGGER.info("Handling message " + HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString());
eventBus.post(new DelegationTokenUpdatedEvent());
HelixTaskResult helixTaskResult = new HelixTaskResult();
helixTaskResult.setSuccess(true);
return helixTaskResult;
}
throw new IllegalArgumentException(String.format("Unknown %s message subtype: %s",
Message.MessageType.USER_DEFINE_MSG.toString(), messageSubType));
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
LOGGER.error(
String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type));
}
}
}
private static Options buildOptions() {
Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Yarn application name");
options.addOption("d", GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME, true, "Yarn application id");
return options;
}
private static void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(GobblinApplicationMaster.class.getSimpleName(), options);
}
public static void main(String[] args) throws Exception {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) ||
(!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME))) {
printUsage(options);
System.exit(1);
}
//Because AM is restarted with the original AppSubmissionContext, it may have outdated delegation tokens.
//So the refreshed tokens should be added into the container's UGI before any HDFS/Hive/RM access is performed.
YarnHelixUtils.updateToken(GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
Log4jConfigurationHelper.updateLog4jConfiguration(GobblinApplicationMaster.class,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE);
LOGGER.info(JvmUtils.getJvmInputArguments());
ContainerId containerId =
ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key()));
try (GobblinApplicationMaster applicationMaster = new GobblinApplicationMaster(
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME),
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME), containerId,
ConfigFactory.load(), new YarnConfiguration())) {
applicationMaster.start();
}
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
| 1,908 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnLogSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filesystem.FileSystemSupplier;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
/**
* A base class for container processes that are sources of Gobblin Yarn application logs.
*
* @author Yinan Li
*/
public class GobblinYarnLogSource {
private static final Splitter COMMA_SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private static final Configuration AUTO_CLOSE_CONFIG = new Configuration();
static {
AUTO_CLOSE_CONFIG.setBoolean("fs.automatic.close", false);
}
/**
* Return if the log source is present or not.
*
* @return {@code true} if the log source is present and {@code false} otherwise
*/
public boolean isLogSourcePresent() {
return System.getenv().containsKey(ApplicationConstants.Environment.LOG_DIRS.toString());
}
/**
* Build a {@link LogCopier} instance used to copy the logs out from this {@link GobblinYarnLogSource}.
* TODO: This is duplicated to the org.apache.gobblin.yarn.GobblinYarnAppLauncher#buildLogCopier(com.typesafe.config.Config, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path)
*
* @param config the {@link Config} use to create the {@link LogCopier}
* @param containerId the {@link ContainerId} of the container the {@link LogCopier} runs in
* @param destFs the destination {@link FileSystem}
* @param appWorkDir the Gobblin Yarn application working directory on HDFS
* @return a {@link LogCopier} instance
* @throws IOException if it fails on any IO operation
*/
public LogCopier buildLogCopier(Config config, String containerId, FileSystem destFs, Path appWorkDir)
throws IOException {
LogCopier.Builder builder = LogCopier.newBuilder()
.useDestFsSupplier(new FileSystemSupplier() {
@Override
public FileSystem getFileSystem() throws IOException {
return buildFileSystem(config, false);
}
})
.useSrcFsSupplier(new FileSystemSupplier() {
@Override
public FileSystem getFileSystem() throws IOException {
return buildFileSystem(config, true);
}
})
.readFrom(getLocalLogDirs())
.writeTo(getHdfsLogDir(containerId, destFs, appWorkDir))
.useCurrentLogFileName(Files.getNameWithoutExtension(
System.getProperty(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_FILE_NAME)));
builder.acceptsLogFileExtensions(
config.hasPath(GobblinYarnConfigurationKeys.LOG_FILE_EXTENSIONS) ? ImmutableSet.copyOf(
Splitter.on(",").splitToList(config.getString(GobblinYarnConfigurationKeys.LOG_FILE_EXTENSIONS)))
: ImmutableSet.of());
return builder.build();
}
/**
* Return a new (non-cached) {@link FileSystem} instance. The {@link FileSystem} instance
* returned by the method has automatic closing disabled. The user of the instance needs to handle closing of the
* instance, typically as part of its shutdown sequence.
*/
public static FileSystem buildFileSystem(Config config, boolean isLocal) throws IOException {
return isLocal ? FileSystem.newInstanceLocal(AUTO_CLOSE_CONFIG)
: config.hasPath(ConfigurationKeys.FS_URI_KEY) ? FileSystem.newInstance(
URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)), AUTO_CLOSE_CONFIG)
: FileSystem.newInstance(AUTO_CLOSE_CONFIG);
}
/**
* Multiple directories may be specified in the LOG_DIRS string. Split them up and return a list of {@link Path}s.
* @return list of {@link Path}s to the log directories
* @throws IOException
*/
private List<Path> getLocalLogDirs() throws IOException {
String logDirs = System.getenv(ApplicationConstants.Environment.LOG_DIRS.toString());
return COMMA_SPLITTER.splitToList(logDirs).stream().map(e -> new Path(e)).collect(Collectors.toList());
}
private Path getHdfsLogDir(String containerId, FileSystem destFs, Path appWorkDir) throws IOException {
Path logRootDir =
PathUtils.combinePaths(appWorkDir.toString(), GobblinYarnConfigurationKeys.APP_LOGS_DIR_NAME, containerId);
if (!destFs.exists(logRootDir)) {
destFs.mkdirs(logRootDir);
}
return logRootDir;
}
}
| 1,909 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnHelixUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Splitter;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import org.apache.gobblin.util.ConfigUtils;
/**
* A utility class for Gobblin on Yarn.
*
* @author Yinan Li
*/
public class YarnHelixUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(YarnHelixUtils.class);
private static final Splitter SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private static final Splitter ZIP_SPLITTER = Splitter.on('#').omitEmptyStrings().trimResults();
/**
* Write a {@link Token} to a given file.
*
* @param tokenFilePath the token file path
* @param credentials all tokens of this credentials to be written to given file
* @param configuration a {@link Configuration} object carrying Hadoop configuration properties
* @throws IOException
*/
public static void writeTokenToFile(Path tokenFilePath, Credentials credentials, Configuration configuration) throws IOException {
if(credentials == null) {
LOGGER.warn("got empty credentials, creating default one as new.");
credentials = new Credentials();
}
LOGGER.debug(String.format("Writing all tokens %s to file %s", credentials.getAllTokens(), tokenFilePath));
credentials.writeTokenStorageFile(tokenFilePath, configuration);
}
/**
* Update {@link Token} with token file localized by NM.
*
* @param tokenFileName name of the token file
* @throws IOException
*/
public static void updateToken(String tokenFileName) throws IOException{
LOGGER.info("reading token from file: "+ tokenFileName);
URL tokenFileUrl = YarnHelixUtils.class.getClassLoader().getResource(tokenFileName);
if (tokenFileUrl != null) {
File tokenFile = new File(tokenFileUrl.getFile());
if (tokenFile.exists()) {
Credentials credentials = Credentials.readTokenStorageFile(tokenFile, new Configuration());
for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
LOGGER.info("updating " + token.getKind() + " " + token.getService());
}
UserGroupInformation.getCurrentUser().addCredentials(credentials);
}
}
}
/**
* Read a collection {@link Token}s from a given file.
*
* @param tokenFilePath the token file path
* @param configuration a {@link Configuration} object carrying Hadoop configuration properties
* @return a collection of {@link Token}s
* @throws IOException
*/
public static Collection<Token<? extends TokenIdentifier>> readTokensFromFile(Path tokenFilePath,
Configuration configuration) throws IOException {
return Credentials.readTokenStorageFile(tokenFilePath, configuration).getAllTokens();
}
/**
* Add a file as a Yarn {@link org.apache.hadoop.yarn.api.records.LocalResource}.
*
* @param fs a {@link FileSystem} instance
* @param destFilePath the destination file path
* @param resourceType the {@link org.apache.hadoop.yarn.api.records.LocalResourceType} of the file
* @param resourceMap a {@link Map} of file names to their corresponding
* {@link org.apache.hadoop.yarn.api.records.LocalResource}s
* @throws IOException if there's something wrong adding the file as a
* {@link org.apache.hadoop.yarn.api.records.LocalResource}
*/
public static void addFileAsLocalResource(FileSystem fs, Path destFilePath, LocalResourceType resourceType,
Map<String, LocalResource> resourceMap) throws IOException {
addFileAsLocalResource(fs, destFilePath, resourceType, resourceMap, destFilePath.getName());
}
public static void addFileAsLocalResource(FileSystem fs, Path destFilePath, LocalResourceType resourceType,
Map<String, LocalResource> resourceMap, String resourceName) throws IOException {
LocalResource fileResource = Records.newRecord(LocalResource.class);
FileStatus fileStatus = fs.getFileStatus(destFilePath);
fileResource.setResource(ConverterUtils.getYarnUrlFromPath(destFilePath));
fileResource.setSize(fileStatus.getLen());
fileResource.setTimestamp(fileStatus.getModificationTime());
fileResource.setType(resourceType);
fileResource.setVisibility(LocalResourceVisibility.APPLICATION);
resourceMap.put(resourceName, fileResource);
}
/**
* Get environment variables in a {@link java.util.Map} used when launching a Yarn container.
*
* @param yarnConfiguration a Hadoop {@link Configuration} object carrying Hadoop/Yarn configuration properties
* @return a {@link java.util.Map} storing environment variables used when launching a Yarn container
*/
@SuppressWarnings("deprecation")
public static Map<String, String> getEnvironmentVariables(Configuration yarnConfiguration) {
Map<String, String> environmentVariableMap = Maps.newHashMap();
if (System.getenv().containsKey(ApplicationConstants.Environment.JAVA_HOME.key())) {
Apps.addToEnvironment(environmentVariableMap, ApplicationConstants.Environment.JAVA_HOME.key(),
System.getenv(ApplicationConstants.Environment.JAVA_HOME.key()));
}
// Add jars/files in the working directory of the ApplicationMaster to the CLASSPATH
Apps.addToEnvironment(environmentVariableMap, ApplicationConstants.Environment.CLASSPATH.key(),
ApplicationConstants.Environment.PWD.$());
Apps.addToEnvironment(environmentVariableMap, ApplicationConstants.Environment.CLASSPATH.key(),
ApplicationConstants.Environment.PWD.$() + File.separator + "*");
String[] classpaths = yarnConfiguration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
if (classpaths != null) {
for (String classpath : classpaths) {
Apps.addToEnvironment(
environmentVariableMap, ApplicationConstants.Environment.CLASSPATH.key(), classpath.trim());
}
}
String[] additionalClassPath = yarnConfiguration.getStrings(GobblinYarnConfigurationKeys.GOBBLIN_YARN_ADDITIONAL_CLASSPATHS);
if (additionalClassPath != null) {
for (String classpath : additionalClassPath) {
Apps.addToEnvironment(
environmentVariableMap, ApplicationConstants.Environment.CLASSPATH.key(), classpath.trim());
}
}
return environmentVariableMap;
}
public static void setAdditionalYarnClassPath(Config config, Configuration yarnConfiguration) {
if (!ConfigUtils.emptyIfNotPresent(config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_ADDITIONAL_CLASSPATHS).equals(
StringUtils.EMPTY)){
yarnConfiguration.setStrings(GobblinYarnConfigurationKeys.GOBBLIN_YARN_ADDITIONAL_CLASSPATHS, config.getString(GobblinYarnConfigurationKeys.GOBBLIN_YARN_ADDITIONAL_CLASSPATHS));
}
}
public static void setYarnClassPath(Config config, Configuration yarnConfiguration) {
if (!ConfigUtils.emptyIfNotPresent(config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_CLASSPATHS).equals(
StringUtils.EMPTY)){
yarnConfiguration.setStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, config.getString(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CLASSPATHS));
}
}
public static void addRemoteFilesToLocalResources(String hdfsFileList, Map<String, LocalResource> resourceMap, Configuration yarnConfiguration) throws IOException {
for (String hdfsFilePath : SPLITTER.split(hdfsFileList)) {
Path srcFilePath = new Path(hdfsFilePath);
YarnHelixUtils.addFileAsLocalResource(
srcFilePath.getFileSystem(yarnConfiguration), srcFilePath, LocalResourceType.FILE, resourceMap);
}
}
public static void addRemoteZipsToLocalResources(String hdfsFileList, Map<String, LocalResource> resourceMap, Configuration yarnConfiguration)
throws IOException {
for (String zipFileWithName : SPLITTER.split(hdfsFileList)) {
Iterator<String> zipFileAndName = ZIP_SPLITTER.split(zipFileWithName).iterator();
Path srcFilePath = new Path(zipFileAndName.next());
try {
YarnHelixUtils.addFileAsLocalResource(srcFilePath.getFileSystem(yarnConfiguration), srcFilePath, LocalResourceType.ARCHIVE,
resourceMap, zipFileAndName.next());
} catch (Exception e) {
throw new IOException(String.format("Fail to extract %s as local resources, maybe a wrong pattern, "
+ "correct pattern should be {zipPath}#{targetUnzippedName}", zipFileAndName), e);
}
}
}
/**
* Return the identifier of the containerId. The identifier is the substring in the containerId representing
* the sequential number of the container.
* @param containerId e.g. "container_e94_1567552810874_2132400_01_000001"
* @return sequence number of the containerId e.g. "container-000001"
*/
public static String getContainerNum(String containerId) {
return "container-" + containerId.substring(containerId.lastIndexOf("_") + 1);
}
/**
* Find the helix tag for the newly allocated container. The tag should align with {@link YarnContainerRequestBundle},
* so that the correct resource can be allocated to helix workflow that has specific resource requirement.
* @param container newly allocated container
* @param helixTagAllocatedContainerCount current container count for each helix tag
* @param requestedYarnContainer yarn container request specify the desired state
* @return helix tag that this container should be assigned with, if null means need to use the default
*/
public static String findHelixTagForContainer(Container container,
Map<String, AtomicInteger> helixTagAllocatedContainerCount, YarnContainerRequestBundle requestedYarnContainer) {
String foundTag = null;
if(requestedYarnContainer != null && requestedYarnContainer.getResourceHelixTagMap().containsKey(container.getResource().toString())) {
for (String tag : requestedYarnContainer.getResourceHelixTagMap().get(container.getResource().toString())) {
int desiredCount = requestedYarnContainer.getHelixTagContainerCountMap().get(tag);
helixTagAllocatedContainerCount.putIfAbsent(tag, new AtomicInteger(0));
int allocatedCount = helixTagAllocatedContainerCount.get(tag).get();
foundTag = tag;
if(allocatedCount < desiredCount) {
return foundTag;
}
}
}
return foundTag;
}
}
| 1,910 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/AbstractYarnAppSecurityManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.helix.Criteria;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinHelixMessagingService;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import static org.apache.hadoop.security.UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
/**
* <p>
* The super class for key management
* This class uses a scheduled task to do re-login to re-fetch token on a
* configurable schedule. It also uses a second scheduled task
* to renew the delegation token after each login. Both the re-login interval and the token
* renewing interval are configurable.
* </p>
* @author Zihan Li
*/
public abstract class AbstractYarnAppSecurityManager extends AbstractIdleService {
protected Logger LOGGER = LoggerFactory.getLogger(this.getClass().getName());
protected Config config;
protected final HelixManager helixManager;
protected final FileSystem fs;
protected final Path tokenFilePath;
protected Credentials credentials = new Credentials();
private final long loginIntervalInMinutes;
private final long tokenRenewIntervalInMinutes;
private final boolean isHelixClusterManaged;
private final String helixInstanceName;
private final ScheduledExecutorService loginExecutor;
private final ScheduledExecutorService tokenRenewExecutor;
private Optional<ScheduledFuture<?>> scheduledTokenRenewTask = Optional.absent();
// This flag is used to tell if this is the first login. If yes, no token updated message will be
// sent to the controller and the participants as they may not be up running yet. The first login
// happens after this class starts up so the token gets regularly refreshed before the next login.
protected volatile boolean firstLogin = true;
public AbstractYarnAppSecurityManager(Config config, HelixManager helixManager, FileSystem fs, Path tokenFilePath) {
this.config = config;
this.helixManager = helixManager;
this.fs = fs;
this.tokenFilePath = tokenFilePath;
this.fs.makeQualified(tokenFilePath);
this.loginIntervalInMinutes = ConfigUtils.getLong(config, GobblinYarnConfigurationKeys.LOGIN_INTERVAL_IN_MINUTES,
GobblinYarnConfigurationKeys.DEFAULT_LOGIN_INTERVAL_IN_MINUTES);
this.tokenRenewIntervalInMinutes = ConfigUtils.getLong(config, GobblinYarnConfigurationKeys.TOKEN_RENEW_INTERVAL_IN_MINUTES,
GobblinYarnConfigurationKeys.DEFAULT_TOKEN_RENEW_INTERVAL_IN_MINUTES);
this.loginExecutor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("KeytabReLoginExecutor")));
this.tokenRenewExecutor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("TokenRenewExecutor")));
this.isHelixClusterManaged = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.IS_HELIX_CLUSTER_MANAGED,
GobblinClusterConfigurationKeys.DEFAULT_IS_HELIX_CLUSTER_MANAGED);
this.helixInstanceName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
GobblinClusterManager.class.getSimpleName());
}
@Override
protected void startUp() throws Exception {
LOGGER.info("Starting the " + this.getClass().getSimpleName());
LOGGER.info(
String.format("Scheduling the login task with an interval of %d minute(s)", this.loginIntervalInMinutes));
// Schedule the Kerberos re-login task
this.loginExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
loginAndScheduleTokenRenewal();
}catch(Exception e){
LOGGER.error("Error during login, will continue the thread and try next time.");
}
}
}, this.loginIntervalInMinutes, this.loginIntervalInMinutes, TimeUnit.MINUTES);
}
@Override
protected void shutDown() throws Exception {
LOGGER.info("Stopping the " + this.getClass().getSimpleName());
if (this.scheduledTokenRenewTask.isPresent()) {
this.scheduledTokenRenewTask.get().cancel(true);
}
ExecutorsUtils.shutdownExecutorService(this.loginExecutor, Optional.of(LOGGER));
ExecutorsUtils.shutdownExecutorService(this.tokenRenewExecutor, Optional.of(LOGGER));
}
protected void scheduleTokenRenewTask() {
LOGGER.info(String.format("Scheduling the token renew task with an interval of %d minute(s)",
this.tokenRenewIntervalInMinutes));
this.scheduledTokenRenewTask = Optional.<ScheduledFuture<?>>of(
this.tokenRenewExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
renewDelegationToken();
} catch (IOException ioe) {
LOGGER.error("Failed to renew delegation token", ioe);
throw Throwables.propagate(ioe);
} catch (InterruptedException ie) {
LOGGER.error("Token renew task has been interrupted");
Thread.currentThread().interrupt();
}
}
}, this.tokenRenewIntervalInMinutes, this.tokenRenewIntervalInMinutes, TimeUnit.MINUTES));
}
//The whole logic for each re-login
public void loginAndScheduleTokenRenewal() {
try {
// Cancel the currently scheduled token renew task
if (scheduledTokenRenewTask.isPresent() && scheduledTokenRenewTask.get().cancel(true)) {
LOGGER.info("Cancelled the token renew task");
}
login();
if (firstLogin) {
firstLogin = false;
}
// Re-schedule the token renew task after re-login
scheduleTokenRenewTask();
} catch (IOException | InterruptedException ioe) {
LOGGER.error("Failed to login from keytab", ioe);
throw Throwables.propagate(ioe);
}
}
protected void sendTokenFileUpdatedMessage() {
// Send a message to the controller (when the cluster is not managed)
// and all the participants if this is not the first login
if (!this.isHelixClusterManaged) {
sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
}
sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
}
/**
* This method is used to send TokenFileUpdatedMessage which will handle by {@link YarnContainerSecurityManager}
*/
@VisibleForTesting
protected void sendTokenFileUpdatedMessage(InstanceType instanceType) {
sendTokenFileUpdatedMessage(instanceType, "");
}
@VisibleForTesting
protected void sendTokenFileUpdatedMessage(InstanceType instanceType, String instanceName) {
Criteria criteria = new Criteria();
criteria.setInstanceName(Strings.isNullOrEmpty(instanceName) ? "%" : instanceName);
criteria.setResource("%");
criteria.setPartition("%");
criteria.setPartitionState("%");
criteria.setRecipientInstanceType(instanceType);
/**
* #HELIX-0.6.7-WORKAROUND
* Add back when LIVESTANCES messaging is ported to 0.6 branch
if (instanceType == InstanceType.PARTICIPANT) {
criteria.setDataSource(Criteria.DataSource.LIVEINSTANCES);
}
**/
criteria.setSessionSpecific(true);
Message tokenFileUpdatedMessage = new Message(Message.MessageType.USER_DEFINE_MSG,
HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString().toLowerCase() + UUID.randomUUID().toString());
tokenFileUpdatedMessage.setMsgSubType(HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString());
tokenFileUpdatedMessage.setMsgState(Message.MessageState.NEW);
if (instanceType == InstanceType.CONTROLLER) {
tokenFileUpdatedMessage.setTgtSessionId("*");
}
// #HELIX-0.6.7-WORKAROUND
// Temporarily bypass the default messaging service to allow upgrade to 0.6.7 which is missing support
// for messaging to instances
//int messagesSent = this.helixManager.getMessagingService().send(criteria, tokenFileUpdatedMessage);
GobblinHelixMessagingService messagingService = new GobblinHelixMessagingService(helixManager);
int messagesSent = messagingService.send(criteria, tokenFileUpdatedMessage);
LOGGER.info(String.format("Sent %d token file updated message(s) to the %s", messagesSent, instanceType));
}
/**
* Write the current credentials to the token file.
*/
protected synchronized void writeDelegationTokenToFile(Credentials cred) throws IOException {
if (this.fs.exists(this.tokenFilePath)) {
LOGGER.info("Deleting existing token file " + this.tokenFilePath);
this.fs.delete(this.tokenFilePath, false);
}
LOGGER.debug("creating new token file {} with 644 permission.", this.tokenFilePath);
YarnHelixUtils.writeTokenToFile(this.tokenFilePath, cred, this.fs.getConf());
// Only grand access to the token file to the login user
this.fs.setPermission(this.tokenFilePath, new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE));
System.setProperty(HADOOP_TOKEN_FILE_LOCATION, tokenFilePath.toUri().getPath());
LOGGER.info("set HADOOP_TOKEN_FILE_LOCATION = {}", this.tokenFilePath);
}
/**
* Write the current delegation token to the token file. Should be only for testing
*/
@VisibleForTesting
protected synchronized void writeDelegationTokenToFile() throws IOException {
writeDelegationTokenToFile(this.credentials);
}
/**
* Renew the existing delegation token.
*/
protected abstract void renewDelegationToken() throws IOException, InterruptedException;
/**
* Login the user from a given keytab file.
*/
protected abstract void login() throws IOException, InterruptedException;
}
| 1,911 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnTaskRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
public class GobblinYarnTaskRunner extends GobblinTaskRunner {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTaskRunner.class);
public static final String HELIX_YARN_INSTANCE_NAME_PREFIX = GobblinYarnTaskRunner.class.getSimpleName();
public GobblinYarnTaskRunner(String applicationName, String applicationId, String helixInstanceName, ContainerId containerId, Config config,
Optional<Path> appWorkDirOptional) throws Exception {
super(applicationName, helixInstanceName, applicationId, getTaskRunnerId(containerId), config
.withValue(GobblinYarnConfigurationKeys.CONTAINER_NUM_KEY,
ConfigValueFactory.fromAnyRef(YarnHelixUtils.getContainerNum(containerId.toString()))), appWorkDirOptional);
}
@Override
public List<Service> getServices() {
List<Service> services = new ArrayList<>();
services.addAll(super.getServices());
LogCopier logCopier = null;
if (clusterConfig.hasPath(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY)) {
GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
String containerLogDir = clusterConfig.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
if (gobblinYarnLogSource.isLogSourcePresent()) {
try {
logCopier = gobblinYarnLogSource.buildLogCopier(this.clusterConfig, this.taskRunnerId, this.fs,
new Path(containerLogDir, GobblinClusterUtils.getAppWorkDirPath(this.applicationName, this.applicationId)));
services.add(logCopier);
} catch (Exception e) {
LOGGER.warn("Cannot add LogCopier service to the service manager due to", e);
}
}
}
if (UserGroupInformation.isSecurityEnabled()) {
LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
services.add(new YarnContainerSecurityManager(this.clusterConfig, this.fs, this.eventBus, logCopier));
}
return services;
}
@Override
public MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() {
return new ParticipantUserDefinedMessageHandlerFactory();
}
/**
* A custom {@link MultiTypeMessageHandlerFactory} for {@link ParticipantUserDefinedMessageHandler}s that
* handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}.
*/
private class ParticipantUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return new ParticipantUserDefinedMessageHandler(message, context);
}
@Override
public String getMessageType() {
return Message.MessageType.USER_DEFINE_MSG.toString();
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
/**
* A custom {@link MessageHandler} for handling user-defined messages to the participants.
*
* <p>
* Currently it handles the following sub types of messages:
*
* <ul>
* <li>{@link org.apache.gobblin.cluster.HelixMessageSubTypes#TOKEN_FILE_UPDATED}</li>
* </ul>
* </p>
*/
private class ParticipantUserDefinedMessageHandler extends MessageHandler {
public ParticipantUserDefinedMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() {
String messageSubType = this._message.getMsgSubType();
if (messageSubType.equalsIgnoreCase(org.apache.gobblin.cluster.HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString())) {
LOGGER.info("Handling message " + org.apache.gobblin.cluster.HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString());
eventBus.post(new DelegationTokenUpdatedEvent());
HelixTaskResult helixTaskResult = new HelixTaskResult();
helixTaskResult.setSuccess(true);
return helixTaskResult;
}
throw new IllegalArgumentException(String
.format("Unknown %s message subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(), messageSubType));
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
LOGGER.error(
String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type));
}
}
}
private static String getApplicationId(ContainerId containerId) {
return containerId.getApplicationAttemptId().getApplicationId().toString();
}
private static String getTaskRunnerId(ContainerId containerId) {
return containerId.toString();
}
public static void main(String[] args) {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) || !cmd
.hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME) || !cmd
.hasOption(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)) {
printUsage(options);
System.exit(1);
}
Log4jConfigurationHelper.updateLog4jConfiguration(GobblinTaskRunner.class,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE);
LOGGER.info(JvmUtils.getJvmInputArguments());
ContainerId containerId =
ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key()));
String applicationName = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME);
String applicationId = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME);
String helixInstanceName = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME);
String helixInstanceTags = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME);
Config config = ConfigFactory.load();
if (!Strings.isNullOrEmpty(helixInstanceTags)) {
config = config.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, ConfigValueFactory.fromAnyRef(helixInstanceTags));
}
GobblinTaskRunner gobblinTaskRunner =
new GobblinYarnTaskRunner(applicationName, applicationId, helixInstanceName, containerId, config,
Optional.<Path>absent());
gobblinTaskRunner.start();
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
} catch (Throwable t) {
// Ideally, we should not be catching non-recoverable exceptions and errors. However,
// simply propagating the exception may prevent the container exit due to the presence of non-daemon threads present
// in the application. Hence, we catch this exception to invoke System.exit() which in turn ensures that all non-daemon threads are killed.
LOGGER.error("Exception encountered: {}", t);
System.exit(1);
}
}
} | 1,912 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.time.Duration;
/**
* A central place for configuration related constants of Gobblin on Yarn.
*
* @author Yinan Li
*/
public class GobblinYarnConfigurationKeys {
public static final String GOBBLIN_YARN_PREFIX = "gobblin.yarn.";
// General Gobblin Yarn application configuration properties.
public static final String APP_MASTER_CLASS = GOBBLIN_YARN_PREFIX + "app.master.class";
public static final String DEFAULT_APP_MASTER_CLASS = GobblinApplicationMaster.class.getName();
public static final String APP_MASTER_LOG_FILE_NAME = GOBBLIN_YARN_PREFIX + "app.master.log.file.name";
public static final String APPLICATION_NAME_KEY = GOBBLIN_YARN_PREFIX + "app.name";
public static final String APP_QUEUE_KEY = GOBBLIN_YARN_PREFIX + "app.queue";
public static final String APP_REPORT_INTERVAL_MINUTES_KEY = GOBBLIN_YARN_PREFIX + "app.report.interval.minutes";
public static final String MAX_GET_APP_REPORT_FAILURES_KEY = GOBBLIN_YARN_PREFIX + "max.get.app.report.failures";
public static final String EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY =
GOBBLIN_YARN_PREFIX + "email.notification.on.shutdown";
public static final String RELEASED_CONTAINERS_CACHE_EXPIRY_SECS = GOBBLIN_YARN_PREFIX + "releasedContainersCacheExpirySecs";
public static final int DEFAULT_RELEASED_CONTAINERS_CACHE_EXPIRY_SECS = 300;
public static final String APP_VIEW_ACL = GOBBLIN_YARN_PREFIX + "appViewAcl";
public static final String DEFAULT_APP_VIEW_ACL = "*";
public static final String YARN_RESOURCE_MANAGER_PREFIX = "yarn.resourcemanager.";
public static final String YARN_RESOURCE_MANAGER_ADDRESS = YARN_RESOURCE_MANAGER_PREFIX + "address";
public static final String YARN_RESOURCE_MANAGER_IDS = YARN_RESOURCE_MANAGER_PREFIX + "ids";
public static final String OTHER_YARN_RESOURCE_MANAGER_ADDRESSES= "other.yarn.resourcemanager.addresses";
// Gobblin Yarn ApplicationMaster configuration properties.
public static final String APP_MASTER_MEMORY_MBS_KEY = GOBBLIN_YARN_PREFIX + "app.master.memory.mbs";
public static final String APP_MASTER_CORES_KEY = GOBBLIN_YARN_PREFIX + "app.master.cores";
public static final String APP_MASTER_JARS_KEY = GOBBLIN_YARN_PREFIX + "app.master.jars";
public static final String APP_MASTER_FILES_LOCAL_KEY = GOBBLIN_YARN_PREFIX + "app.master.files.local";
public static final String APP_MASTER_FILES_REMOTE_KEY = GOBBLIN_YARN_PREFIX + "app.master.files.remote";
public static final String APP_MASTER_ZIPS_REMOTE_KEY = GOBBLIN_YARN_PREFIX + "app.master.zips.remote";
public static final String APP_MASTER_WORK_DIR_NAME = "appmaster";
public static final String APP_MASTER_JVM_ARGS_KEY = GOBBLIN_YARN_PREFIX + "app.master.jvm.args";
public static final String APP_MASTER_SERVICE_CLASSES = GOBBLIN_YARN_PREFIX + "app.master.serviceClasses";
public static final String APP_MASTER_MAX_ATTEMPTS_KEY = GOBBLIN_YARN_PREFIX + "app.master.max.attempts";
public static final int DEFAULT_APP_MASTER_MAX_ATTEMPTS_KEY = 10;
// Amount of overhead to subtract when computing the Xmx value. This is to account for non-heap memory, like metaspace
// and stack memory
public static final String APP_MASTER_JVM_MEMORY_OVERHEAD_MBS_KEY = GOBBLIN_YARN_PREFIX + "app.master.jvmMemoryOverheadMbs";
public static final int DEFAULT_APP_MASTER_JVM_MEMORY_OVERHEAD_MBS = 0;
// The ratio of the amount of Xmx to carve out of the container memory before adjusting for jvm memory overhead
public static final String APP_MASTER_JVM_MEMORY_XMX_RATIO_KEY = GOBBLIN_YARN_PREFIX + "app.master.jvmMemoryXmxRatio";
public static final double DEFAULT_APP_MASTER_JVM_MEMORY_XMX_RATIO = 1.0;
// Gobblin Yarn container configuration properties.
public static final String INITIAL_CONTAINERS_KEY = GOBBLIN_YARN_PREFIX + "initial.containers";
public static final String CONTAINER_MEMORY_MBS_KEY = GOBBLIN_YARN_PREFIX + "container.memory.mbs";
public static final String CONTAINER_CORES_KEY = GOBBLIN_YARN_PREFIX + "container.cores";
public static final String CONTAINER_JARS_KEY = GOBBLIN_YARN_PREFIX + "container.jars";
public static final String CONTAINER_FILES_LOCAL_KEY = GOBBLIN_YARN_PREFIX + "container.files.local";
public static final String CONTAINER_FILES_REMOTE_KEY = GOBBLIN_YARN_PREFIX + "container.files.remote";
public static final String CONTAINER_ZIPS_REMOTE_KEY = GOBBLIN_YARN_PREFIX + "container.zips.remote";
public static final String CONTAINER_WORK_DIR_NAME = "container";
public static final String CONTAINER_JVM_ARGS_KEY = GOBBLIN_YARN_PREFIX + "container.jvm.args";
public static final String CONTAINER_HOST_AFFINITY_ENABLED = GOBBLIN_YARN_PREFIX + "container.affinity.enabled";
// Amount of overhead to subtract when computing the Xmx value. This is to account for non-heap memory, like metaspace
// and stack memory
public static final String CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY = GOBBLIN_YARN_PREFIX + "container.jvmMemoryOverheadMbs";
public static final int DEFAULT_CONTAINER_JVM_MEMORY_OVERHEAD_MBS = 0;
// The ratio of the amount of Xmx to carve out of the container memory before adjusting for jvm memory overhead
public static final String CONTAINER_JVM_MEMORY_XMX_RATIO_KEY = GOBBLIN_YARN_PREFIX + "container.jvmMemoryXmxRatio";
public static final double DEFAULT_CONTAINER_JVM_MEMORY_XMX_RATIO = 1.0;
public static final String MAX_CONTAINER_LAUNCH_THREADS_KEY = GOBBLIN_YARN_PREFIX + "maxContainerLaunchThreads";
public static final Integer DEFAULT_MAX_CONTAINER_LAUNCH_THREADS = 1024;
// Helix configuration properties.
public static final String HELIX_INSTANCE_MAX_RETRIES = GOBBLIN_YARN_PREFIX + "helix.instance.max.retries";
public static final String HELIX_PURGE_PREFIX = GOBBLIN_YARN_PREFIX + "helix.purgeOfflineHelixInstances.";
public static final String HELIX_PURGE_OFFLINE_INSTANCES_ENABLED = HELIX_PURGE_PREFIX + "enabled";
public static final boolean DEFAULT_HELIX_PURGE_OFFLINE_INSTANCES_ENABLED = true;
public static final String HELIX_PURGE_LAGGING_THRESHOLD_MILLIS = HELIX_PURGE_PREFIX + "laggingThresholdMs";
public static final long DEFAULT_HELIX_PURGE_LAGGING_THRESHOLD_MILLIS = Duration.ofMinutes(1).toMillis();
public static final String HELIX_PURGE_POLLING_RATE_MILLIS = HELIX_PURGE_PREFIX + "pollingRateMs";
public static final long DEFAULT_HELIX_PURGE_POLLING_RATE_MILLIS = Duration.ofSeconds(5).toMillis();
// Security and authentication configuration properties.
public static final String SECURITY_MANAGER_CLASS = GOBBLIN_YARN_PREFIX + "security.manager.class";
public static final String DEFAULT_SECURITY_MANAGER_CLASS = "org.apache.gobblin.yarn.YarnAppSecurityManagerWithKeytabs";
public static final String ENABLE_KEY_MANAGEMENT = GOBBLIN_YARN_PREFIX + "enable.key.management";
public static final String KEYTAB_FILE_PATH = GOBBLIN_YARN_PREFIX + "keytab.file.path";
public static final String KEYTAB_PRINCIPAL_NAME = GOBBLIN_YARN_PREFIX + "keytab.principal.name";
public static final String TOKEN_FILE_NAME = ".token";
public static final String LOGIN_INTERVAL_IN_MINUTES = GOBBLIN_YARN_PREFIX + "login.interval.minutes";
public static final Long DEFAULT_LOGIN_INTERVAL_IN_MINUTES = Long.MAX_VALUE;
public static final String TOKEN_RENEW_INTERVAL_IN_MINUTES = GOBBLIN_YARN_PREFIX + "token.renew.interval.minutes";
public static final Long DEFAULT_TOKEN_RENEW_INTERVAL_IN_MINUTES = Long.MAX_VALUE;
// Resource/dependencies configuration properties.
// Missing this configuration should throw fatal exceptions to avoid harder-to-debug situation from Yarn container side.
public static final String LIB_JARS_DIR_KEY = GOBBLIN_YARN_PREFIX + "lib.jars.dir";
public static final String LIB_JARS_DIR_NAME = "_libjars";
public static final String APP_JARS_DIR_NAME = "_appjars";
public static final String APP_FILES_DIR_NAME = "_appfiles";
public static final String APP_LOGS_DIR_NAME = "_applogs";
//Container Log location properties
public static final String GOBBLIN_YARN_CONTAINER_LOG_DIR_NAME = GobblinYarnConfigurationKeys.GOBBLIN_YARN_PREFIX + "app.container.log.dir";
public static final String GOBBLIN_YARN_CONTAINER_LOG_FILE_NAME = GobblinYarnConfigurationKeys.GOBBLIN_YARN_PREFIX + "app.container.log.file";
// Other misc configuration properties.
public static final String LOGS_SINK_ROOT_DIR_KEY = GOBBLIN_YARN_PREFIX + "logs.sink.root.dir";
public static final String LOG_FILE_EXTENSIONS = GOBBLIN_YARN_PREFIX + "log.file.extensions" ;
public static final String LOG_COPIER_DISABLE_DRIVER_COPY = GOBBLIN_YARN_PREFIX + "log.copier.disable.driver.copy";
public static final String GOBBLIN_YARN_CONTAINER_TIMEZONE = GOBBLIN_YARN_PREFIX + "container.timezone" ;
public static final String DEFAULT_GOBBLIN_YARN_CONTAINER_TIMEZONE = "America/Los_Angeles" ;
//Constant definitions
public static final String GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE = "log4j-yarn.properties";
public static final String JVM_USER_TIMEZONE_CONFIG = "user.timezone";
//Configuration properties relating to container mode of execution e.g. Gobblin cluster runs on Yarn
public static final String CONTAINER_NUM_KEY = "container.num";
//Configuration to allow GobblinYarnAppLauncher to exit without killing the Gobblin-on-Yarn application
public static final String GOBBLIN_YARN_DETACH_ON_EXIT_ENABLED = GOBBLIN_YARN_PREFIX + "detach.on.exit.enabled";
public static final boolean DEFAULT_GOBBLIN_YARN_DETACH_ON_EXIT = false;
//Configuration to set log levels for classes in Azkaban mode
public static final String GOBBLIN_YARN_AZKABAN_CLASS_LOG_LEVELS = GOBBLIN_YARN_PREFIX + "azkaban.class.logLevels";
//Container classpaths properties
public static final String GOBBLIN_YARN_ADDITIONAL_CLASSPATHS = GOBBLIN_YARN_PREFIX + "additional.classpaths";
public static final String GOBBLIN_YARN_CLASSPATHS = GOBBLIN_YARN_PREFIX + "classpaths";
//Config to control Heartbeat interval for Yarn AMRM client.
public static final String AMRM_HEARTBEAT_INTERVAL_SECS = GOBBLIN_YARN_PREFIX + "amRmHeartbeatIntervalSecs";
public static final Integer DEFAULT_AMRM_HEARTBEAT_INTERVAL_SECS = 15;
}
| 1,913 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.Records;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterMetricTagNames;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricReporterException;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import org.apache.gobblin.yarn.event.ContainerReleaseRequest;
import org.apache.gobblin.yarn.event.ContainerShutdownRequest;
import org.apache.gobblin.yarn.event.NewContainerRequest;
import static org.apache.gobblin.yarn.GobblinYarnTaskRunner.HELIX_YARN_INSTANCE_NAME_PREFIX;
/**
* This class is responsible for all Yarn-related stuffs including ApplicationMaster registration,
* ApplicationMaster un-registration, Yarn container management, etc.
*
* @author Yinan Li
*/
public class YarnService extends AbstractIdleService {
private static final Logger LOGGER = LoggerFactory.getLogger(YarnService.class);
private static final String UNKNOWN_HELIX_INSTANCE = "UNKNOWN";
private final String applicationName;
private final String applicationId;
private final String appViewAcl;
//Default helix instance tag derived from cluster level config
private final String helixInstanceTags;
private final Config config;
private final EventBus eventBus;
private final Configuration yarnConfiguration;
private final FileSystem fs;
private final Optional<GobblinMetrics> gobblinMetrics;
private final Optional<EventSubmitter> eventSubmitter;
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync;
private final NMClientAsync nmClientAsync;
private final ExecutorService containerLaunchExecutor;
private final int initialContainers;
private final int requestedContainerMemoryMbs;
private final int requestedContainerCores;
private final int jvmMemoryOverheadMbs;
private final double jvmMemoryXmxRatio;
private final boolean containerHostAffinityEnabled;
private final int helixInstanceMaxRetries;
private final Optional<String> containerJvmArgs;
private final String containerTimezone;
private final HelixManager helixManager;
private final HelixAdmin helixAdmin;
@Getter(AccessLevel.PROTECTED)
private volatile Optional<Resource> maxResourceCapacity = Optional.absent();
// Security tokens for accessing HDFS
private ByteBuffer tokens;
private final Closer closer = Closer.create();
private final Object allContainersStopped = new Object();
// A map from container IDs to Container instances, Helix participant IDs of the containers and Helix Tag
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final ConcurrentMap<ContainerId, ContainerInfo> containerMap = Maps.newConcurrentMap();
// A cache of the containers with an outstanding container release request.
// This is a cache instead of a set to get the automatic cleanup in case a container completes before the requested
// release.
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final Cache<ContainerId, String> releasedContainerCache;
// A generator for an integer ID of a Helix instance (participant)
private final AtomicInteger helixInstanceIdGenerator = new AtomicInteger(0);
// A map from Helix instance names to the number times the instances are retried to be started
private final ConcurrentMap<String, AtomicInteger> helixInstanceRetryCount = Maps.newConcurrentMap();
// A concurrent HashSet of unused Helix instance names. An unused Helix instance name gets put
// into the set if the container running the instance completes. Unused Helix
// instance names get picked up when replacement containers get allocated.
private final Set<String> unusedHelixInstanceNames = ConcurrentHashMap.newKeySet();
// The map from helix tag to allocated container count
private final ConcurrentMap<String, AtomicInteger> allocatedContainerCountMap = Maps.newConcurrentMap();
private final boolean isPurgingOfflineHelixInstancesEnabled;
private final long helixPurgeLaggingThresholdMs;
private final long helixPurgeStatusPollingRateMs;
private final ConcurrentMap<ContainerId, Long> containerIdleSince = Maps.newConcurrentMap();
private final ConcurrentMap<ContainerId, String> removedContainerID = Maps.newConcurrentMap();
private volatile YarnContainerRequestBundle yarnContainerRequest;
private final AtomicInteger priorityNumGenerator = new AtomicInteger(0);
private final Map<String, Integer> resourcePriorityMap = new HashMap<>();
private volatile boolean shutdownInProgress = false;
private volatile boolean startupInProgress = true;
public YarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus, HelixManager helixManager, HelixAdmin helixAdmin) throws Exception {
this.applicationName = applicationName;
this.applicationId = applicationId;
this.config = config;
this.eventBus = eventBus;
this.helixManager = helixManager;
this.helixAdmin = helixAdmin;
this.gobblinMetrics = config.getBoolean(ConfigurationKeys.METRICS_ENABLED_KEY) ?
Optional.of(buildGobblinMetrics()) : Optional.<GobblinMetrics>absent();
this.eventSubmitter = config.getBoolean(ConfigurationKeys.METRICS_ENABLED_KEY) ?
Optional.of(buildEventSubmitter()) : Optional.<EventSubmitter>absent();
this.yarnConfiguration = yarnConfiguration;
this.fs = fs;
int amRmHeartbeatIntervalMillis = Long.valueOf(TimeUnit.SECONDS.toMillis(
ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.AMRM_HEARTBEAT_INTERVAL_SECS,
GobblinYarnConfigurationKeys.DEFAULT_AMRM_HEARTBEAT_INTERVAL_SECS))).intValue();
this.amrmClientAsync = closer.register(
AMRMClientAsync.createAMRMClientAsync(amRmHeartbeatIntervalMillis, new AMRMClientCallbackHandler()));
this.amrmClientAsync.init(this.yarnConfiguration);
this.nmClientAsync = closer.register(NMClientAsync.createNMClientAsync(getNMClientCallbackHandler()));
this.nmClientAsync.init(this.yarnConfiguration);
this.initialContainers = config.getInt(GobblinYarnConfigurationKeys.INITIAL_CONTAINERS_KEY);
this.requestedContainerMemoryMbs = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY);
this.requestedContainerCores = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_CORES_KEY);
this.containerHostAffinityEnabled = config.getBoolean(GobblinYarnConfigurationKeys.CONTAINER_HOST_AFFINITY_ENABLED);
this.helixInstanceMaxRetries = config.getInt(GobblinYarnConfigurationKeys.HELIX_INSTANCE_MAX_RETRIES);
this.helixInstanceTags = ConfigUtils.getString(config,
GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, GobblinClusterConfigurationKeys.HELIX_DEFAULT_TAG);
this.isPurgingOfflineHelixInstancesEnabled = ConfigUtils.getBoolean(config,
GobblinYarnConfigurationKeys.HELIX_PURGE_OFFLINE_INSTANCES_ENABLED,
GobblinYarnConfigurationKeys.DEFAULT_HELIX_PURGE_OFFLINE_INSTANCES_ENABLED);
this.helixPurgeLaggingThresholdMs = ConfigUtils.getLong(config,
GobblinYarnConfigurationKeys.HELIX_PURGE_LAGGING_THRESHOLD_MILLIS,
GobblinYarnConfigurationKeys.DEFAULT_HELIX_PURGE_LAGGING_THRESHOLD_MILLIS);
this.helixPurgeStatusPollingRateMs = ConfigUtils.getLong(config,
GobblinYarnConfigurationKeys.HELIX_PURGE_POLLING_RATE_MILLIS,
GobblinYarnConfigurationKeys.DEFAULT_HELIX_PURGE_POLLING_RATE_MILLIS);
this.containerJvmArgs = config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_JVM_ARGS_KEY) ?
Optional.of(config.getString(GobblinYarnConfigurationKeys.CONTAINER_JVM_ARGS_KEY)) :
Optional.<String>absent();
int numContainerLaunchThreads =
ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.MAX_CONTAINER_LAUNCH_THREADS_KEY,
GobblinYarnConfigurationKeys.DEFAULT_MAX_CONTAINER_LAUNCH_THREADS);
this.containerLaunchExecutor = ScalingThreadPoolExecutor.newScalingThreadPool(5, numContainerLaunchThreads, 0L,
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ContainerLaunchExecutor")));
this.tokens = getSecurityTokens();
this.releasedContainerCache = CacheBuilder.newBuilder().expireAfterAccess(ConfigUtils.getInt(config,
GobblinYarnConfigurationKeys.RELEASED_CONTAINERS_CACHE_EXPIRY_SECS,
GobblinYarnConfigurationKeys.DEFAULT_RELEASED_CONTAINERS_CACHE_EXPIRY_SECS), TimeUnit.SECONDS).build();
this.jvmMemoryXmxRatio = ConfigUtils.getDouble(this.config,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY,
GobblinYarnConfigurationKeys.DEFAULT_CONTAINER_JVM_MEMORY_XMX_RATIO);
Preconditions.checkArgument(this.jvmMemoryXmxRatio >= 0 && this.jvmMemoryXmxRatio <= 1,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY + " must be between 0 and 1 inclusive");
this.jvmMemoryOverheadMbs = ConfigUtils.getInt(this.config,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY,
GobblinYarnConfigurationKeys.DEFAULT_CONTAINER_JVM_MEMORY_OVERHEAD_MBS);
Preconditions.checkArgument(this.jvmMemoryOverheadMbs < this.requestedContainerMemoryMbs * this.jvmMemoryXmxRatio,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY + " cannot be more than "
+ GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY + " * "
+ GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY);
this.appViewAcl = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.APP_VIEW_ACL,
GobblinYarnConfigurationKeys.DEFAULT_APP_VIEW_ACL);
this.containerTimezone = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_TIMEZONE,
GobblinYarnConfigurationKeys.DEFAULT_GOBBLIN_YARN_CONTAINER_TIMEZONE);
}
@SuppressWarnings("unused")
@Subscribe
public void handleNewContainerRequest(NewContainerRequest newContainerRequest) {
if (!this.maxResourceCapacity.isPresent()) {
LOGGER.error(String.format(
"Unable to handle new container request as maximum resource capacity is not available: "
+ "[memory (MBs) requested = %d, vcores requested = %d]", this.requestedContainerMemoryMbs,
this.requestedContainerCores));
return;
}
requestContainer(newContainerRequest.getReplacedContainer().transform(container -> container.getNodeId().getHost()),
newContainerRequest.getResource());
}
protected NMClientCallbackHandler getNMClientCallbackHandler() {
return new NMClientCallbackHandler();
}
@SuppressWarnings("unused")
@Subscribe
public void handleContainerShutdownRequest(ContainerShutdownRequest containerShutdownRequest) {
for (Container container : containerShutdownRequest.getContainers()) {
LOGGER.info(String.format("Stopping container %s running on %s", container.getId(), container.getNodeId()));
this.nmClientAsync.stopContainerAsync(container.getId(), container.getNodeId());
}
}
/**
* Request the Resource Manager to release the container
* @param containerReleaseRequest containers to release
*/
@Subscribe
public void handleContainerReleaseRequest(ContainerReleaseRequest containerReleaseRequest) {
for (Container container : containerReleaseRequest.getContainers()) {
LOGGER.info(String.format("Releasing container %s running on %s", container.getId(), container.getNodeId()));
// Record that this container was explicitly released so that a new one is not spawned to replace it
// Put the container id in the releasedContainerCache before releasing it so that handleContainerCompletion()
// can check for the container id and skip spawning a replacement container.
// Note that this is the best effort since these are asynchronous operations and a container may abort concurrently
// with the release call. So in some cases a replacement container may have already been spawned before
// the container is put into the black list.
this.releasedContainerCache.put(container.getId(), "");
this.amrmClientAsync.releaseAssignedContainer(container.getId());
}
}
@Override
protected synchronized void startUp() throws Exception {
LOGGER.info("Starting the YarnService");
// Register itself with the EventBus for container-related requests
this.eventBus.register(this);
this.amrmClientAsync.start();
this.nmClientAsync.start();
// The ApplicationMaster registration response is used to determine the maximum resource capacity of the cluster
RegisterApplicationMasterResponse response = this.amrmClientAsync.registerApplicationMaster(
GobblinClusterUtils.getHostname(), -1, "");
LOGGER.info("ApplicationMaster registration response: " + response);
this.maxResourceCapacity = Optional.of(response.getMaximumResourceCapability());
if (this.isPurgingOfflineHelixInstancesEnabled) {
purgeHelixOfflineInstances(this.helixPurgeLaggingThresholdMs);
}
LOGGER.info("Requesting initial containers");
requestInitialContainers(this.initialContainers);
startupInProgress = false;
}
private void purgeHelixOfflineInstances(long laggingThresholdMs) {
LOGGER.info("Purging offline helix instances before allocating containers for helixClusterName={}, connectionString={}, helixPurgeStatusPollingRateMs={}",
helixManager.getClusterName(), helixManager.getMetadataStoreConnectionString(), this.helixPurgeStatusPollingRateMs);
HelixInstancePurgerWithMetrics purger = new HelixInstancePurgerWithMetrics(this.eventSubmitter.orNull(),
this.helixPurgeStatusPollingRateMs);
Map<String, String> gteMetadata = ImmutableMap.of(
"connectionString", this.helixManager.getMetadataStoreConnectionString(),
"clusterName", this.helixManager.getClusterName()
);
purger.purgeAllOfflineInstances(this.helixAdmin, this.helixManager.getClusterName(), laggingThresholdMs, gteMetadata);
}
@Override
protected void shutDown() throws IOException {
LOGGER.info("Stopping the YarnService");
this.shutdownInProgress = true;
try {
ExecutorsUtils.shutdownExecutorService(this.containerLaunchExecutor, Optional.of(LOGGER));
// Stop the running containers
for (ContainerInfo containerInfo : this.containerMap.values()) {
LOGGER.info("Stopping container {} running participant {}", containerInfo.getContainer().getId(),
containerInfo.getHelixParticipantId());
this.nmClientAsync.stopContainerAsync(containerInfo.getContainer().getId(), containerInfo.getContainer().getNodeId());
}
if (!this.containerMap.isEmpty()) {
synchronized (this.allContainersStopped) {
try {
// Wait 5 minutes for the containers to stop
Duration waitTimeout = Duration.ofMinutes(5);
this.allContainersStopped.wait(waitTimeout.toMillis());
LOGGER.info("All of the containers have been stopped");
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
this.amrmClientAsync.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
} catch (IOException | YarnException e) {
LOGGER.error("Failed to unregister the ApplicationMaster", e);
} finally {
try {
this.closer.close();
} finally {
if (this.gobblinMetrics.isPresent()) {
this.gobblinMetrics.get().stopMetricsReporting();
}
}
}
}
public void updateToken() throws IOException{
this.tokens = getSecurityTokens();
}
private GobblinMetrics buildGobblinMetrics() {
// Create tags list
ImmutableList.Builder<Tag<?>> tags = new ImmutableList.Builder<>();
tags.add(new Tag<>(GobblinClusterMetricTagNames.APPLICATION_ID, this.applicationId));
tags.add(new Tag<>(GobblinClusterMetricTagNames.APPLICATION_NAME, this.applicationName));
// Intialize Gobblin metrics and start reporters
GobblinMetrics gobblinMetrics = GobblinMetrics.get(this.applicationId, null, tags.build());
try {
gobblinMetrics.startMetricReporting(ConfigUtils.configToProperties(config));
} catch (MultiReporterException ex) {
for (MetricReporterException e: ex.getExceptions()) {
LOGGER.error("Failed to start {} {} reporter.", e.getSinkType().name(), e.getReporterType().name(), e);
}
}
return gobblinMetrics;
}
private EventSubmitter buildEventSubmitter() {
return new EventSubmitter.Builder(this.gobblinMetrics.get().getMetricContext(),
GobblinYarnEventConstants.EVENT_NAMESPACE)
.build();
}
/**
* Request an allocation of containers. If numTargetContainers is larger than the max of current and expected number
* of containers then additional containers are requested.
* <p>
* If numTargetContainers is less than the current number of allocated containers then release free containers.
* Shrinking is relative to the number of currently allocated containers since it takes time for containers
* to be allocated and assigned work and we want to avoid releasing a container prematurely before it is assigned
* work. This means that a container may not be released even though numTargetContainers is less than the requested
* number of containers. The intended usage is for the caller of this method to make periodic calls to attempt to
* adjust the cluster towards the desired number of containers.
*
* @param yarnContainerRequestBundle the desired containers information, including numbers, resource and helix tag
* @param inUseInstances a set of in use instances
* @return whether successfully requested the target number of containers
*/
public synchronized boolean requestTargetNumberOfContainers(YarnContainerRequestBundle yarnContainerRequestBundle, Set<String> inUseInstances) {
LOGGER.info("Trying to set numTargetContainers={}, in-use helix instances count is {}, container map size is {}",
yarnContainerRequestBundle.getTotalContainers(), inUseInstances.size(), this.containerMap.size());
if (startupInProgress) {
LOGGER.warn("YarnService is still starting up. Unable to request containers from yarn until YarnService is finished starting up.");
return false;
}
//Correct the containerMap first as there is cases that handleContainerCompletion() is called before onContainersAllocated()
for (ContainerId removedId : this.removedContainerID.keySet()) {
ContainerInfo containerInfo = this.containerMap.remove(removedId);
if (containerInfo != null) {
String helixTag = containerInfo.getHelixTag();
allocatedContainerCountMap.putIfAbsent(helixTag, new AtomicInteger(0));
this.allocatedContainerCountMap.get(helixTag).decrementAndGet();
this.removedContainerID.remove(removedId);
}
}
int numTargetContainers = yarnContainerRequestBundle.getTotalContainers();
// YARN can allocate more than the requested number of containers, compute additional allocations and deallocations
// based on the max of the requested and actual allocated counts
// Represents the number of containers allocated for across all helix tags
int totalAllocatedContainers = this.containerMap.size();
int totalContainersInContainerCountMap = 0;
for (AtomicInteger count: allocatedContainerCountMap.values()) {
totalContainersInContainerCountMap += count.get();
}
if (totalContainersInContainerCountMap != totalAllocatedContainers) {
LOGGER.warn(String.format("Container number mismatch in containerMap and allocatedContainerCountMap, "
+ "we have %s containers in containerMap while %s in allocatedContainerCountMap", totalAllocatedContainers, totalContainersInContainerCountMap));
}
// Request additional containers if the desired count is higher than the max of the current allocation or previously
// requested amount. Note that there may be in-flight or additional allocations after numContainers has been computed
// so overshooting can occur, but periodic calls to this method will make adjustments towards the target.
for (Map.Entry<String, Integer> entry : yarnContainerRequestBundle.getHelixTagContainerCountMap().entrySet()) {
String currentHelixTag = entry.getKey();
int desiredContainerCount = entry.getValue();
Resource resourceForHelixTag = yarnContainerRequestBundle.getHelixTagResourceMap().get(currentHelixTag);
// Calculate requested container count based on adding allocated count and outstanding ContainerRequests in Yarn
allocatedContainerCountMap.putIfAbsent(currentHelixTag, new AtomicInteger(0));
int allocatedContainersForHelixTag = allocatedContainerCountMap.get(currentHelixTag).get();
int outstandingContainerRequests = getMatchingRequestsCount(resourceForHelixTag);
int requestedContainerCount = allocatedContainersForHelixTag + outstandingContainerRequests;
int numContainersNeeded = desiredContainerCount - requestedContainerCount;
LOGGER.info("Container counts for helixTag={} (allocatedContainers={}, outstandingContainerRequests={}, desiredContainerCount={}, numContainersNeeded={})",
currentHelixTag, allocatedContainersForHelixTag, outstandingContainerRequests, desiredContainerCount, numContainersNeeded);
if (numContainersNeeded > 0) {
requestContainers(numContainersNeeded, resourceForHelixTag);
}
}
//Iterate through all containers allocated and check whether the corresponding helix instance is still LIVE within the helix cluster.
// A container that has a bad connection to zookeeper will be dropped from the Helix cluster if the disconnection is greater than the specified timeout.
// In these cases, we want to release the container to get a new container because these containers won't be assigned tasks by Helix
List<Container> containersToRelease = new ArrayList<>();
HashSet<ContainerId> idleContainerIdsToRelease = new HashSet<>();
for (Map.Entry<ContainerId, ContainerInfo> entry : this.containerMap.entrySet()) {
ContainerInfo containerInfo = entry.getValue();
if (!HelixUtils.isInstanceLive(helixManager, containerInfo.getHelixParticipantId())) {
containerIdleSince.putIfAbsent(entry.getKey(), System.currentTimeMillis());
if (System.currentTimeMillis() - containerIdleSince.get(entry.getKey())
>= TimeUnit.MINUTES.toMillis(YarnAutoScalingManager.DEFAULT_MAX_CONTAINER_IDLE_TIME_BEFORE_SCALING_DOWN_MINUTES)) {
LOGGER.info("Releasing Container {} because the assigned participant {} has been in-active for more than {} minutes",
entry.getKey(), containerInfo.getHelixParticipantId(), YarnAutoScalingManager.DEFAULT_MAX_CONTAINER_IDLE_TIME_BEFORE_SCALING_DOWN_MINUTES);
containersToRelease.add(containerInfo.getContainer());
idleContainerIdsToRelease.add(entry.getKey());
}
} else {
containerIdleSince.remove(entry.getKey());
}
}
// If the total desired is lower than the currently allocated amount then release free containers.
// This is based on the currently allocated amount since containers may still be in the process of being allocated
// and assigned work. Resizing based on numRequestedContainers at this point may release a container right before
// or soon after it is assigned work.
if (numTargetContainers < totalAllocatedContainers - idleContainerIdsToRelease.size()) {
int numToShutdown = totalAllocatedContainers - numTargetContainers;
LOGGER.info("Shrinking number of containers by {} because numTargetContainers < totalAllocatedContainers - idleContainersToRelease ({} < {} - {})",
totalAllocatedContainers - numTargetContainers - idleContainerIdsToRelease.size(), numTargetContainers, totalAllocatedContainers, idleContainerIdsToRelease.size());
// Look for eligible containers to release. If a container is in use then it is not released.
for (Map.Entry<ContainerId, ContainerInfo> entry : this.containerMap.entrySet()) {
ContainerInfo containerInfo = entry.getValue();
if (!inUseInstances.contains(containerInfo.getHelixParticipantId()) && !idleContainerIdsToRelease.contains(entry.getKey())) {
containersToRelease.add(containerInfo.getContainer());
}
if (containersToRelease.size() >= numToShutdown) {
break;
}
}
LOGGER.info("Shutting down {} containers. containersToRelease={}", containersToRelease.size(), containersToRelease);
}
if (!containersToRelease.isEmpty()) {
this.eventBus.post(new ContainerReleaseRequest(containersToRelease));
}
this.yarnContainerRequest = yarnContainerRequestBundle;
LOGGER.info("Current tag-container desired count:{}, tag-container allocated: {}",
yarnContainerRequestBundle.getHelixTagContainerCountMap(), this.allocatedContainerCountMap);
return true;
}
// Request initial containers with default resource and helix tag
private void requestInitialContainers(int containersRequested) {
YarnContainerRequestBundle initialYarnContainerRequest = new YarnContainerRequestBundle();
Resource capability = Resource.newInstance(this.requestedContainerMemoryMbs, this.requestedContainerCores);
initialYarnContainerRequest.add(this.helixInstanceTags, containersRequested, capability);
requestTargetNumberOfContainers(initialYarnContainerRequest, Collections.EMPTY_SET);
}
private void requestContainer(Optional<String> preferredNode, Optional<Resource> resourceOptional) {
Resource desiredResource = resourceOptional.or(Resource.newInstance(
this.requestedContainerMemoryMbs, this.requestedContainerCores));
requestContainer(preferredNode, desiredResource);
}
/**
* Request {@param numContainers} from yarn with the specified resource. Resources will be allocated without a preferred
* node
* @param numContainers
* @param resource
*/
private void requestContainers(int numContainers, Resource resource) {
LOGGER.info("Requesting {} containers with resource={}", numContainers, resource);
IntStream.range(0, numContainers)
.forEach(i -> requestContainer(Optional.absent(), resource));
}
// Request containers with specific resource requirement
private void requestContainer(Optional<String> preferredNode, Resource resource) {
// Fail if Yarn cannot meet container resource requirements
Preconditions.checkArgument(resource.getMemory() <= this.maxResourceCapacity.get().getMemory() &&
resource.getVirtualCores() <= this.maxResourceCapacity.get().getVirtualCores(),
"Resource requirement must less than the max resource capacity. Requested resource" + resource.toString()
+ " exceed the max resource limit " + this.maxResourceCapacity.get().toString());
// Due to YARN-314, different resource capacity needs different priority, otherwise Yarn will not allocate container
Priority priority = Records.newRecord(Priority.class);
if(!resourcePriorityMap.containsKey(resource.toString())) {
resourcePriorityMap.put(resource.toString(), priorityNumGenerator.getAndIncrement());
}
int priorityNum = resourcePriorityMap.get(resource.toString());
priority.setPriority(priorityNum);
String[] preferredNodes = preferredNode.isPresent() ? new String[] {preferredNode.get()} : null;
this.amrmClientAsync.addContainerRequest(
new AMRMClient.ContainerRequest(resource, preferredNodes, null, priority));
}
protected ContainerLaunchContext newContainerLaunchContext(ContainerInfo containerInfo)
throws IOException {
Path appWorkDir = GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, this.applicationId);
Path containerWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME);
Map<String, LocalResource> resourceMap = Maps.newHashMap();
addContainerLocalResources(new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME), resourceMap);
addContainerLocalResources(new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME), resourceMap);
addContainerLocalResources(
new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME), resourceMap);
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY)) {
YarnHelixUtils.addRemoteFilesToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY),
resourceMap, yarnConfiguration);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_ZIPS_REMOTE_KEY)) {
YarnHelixUtils.addRemoteZipsToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_ZIPS_REMOTE_KEY),
resourceMap, yarnConfiguration);
}
ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
containerLaunchContext.setLocalResources(resourceMap);
containerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
containerLaunchContext.setCommands(Arrays.asList(containerInfo.getStartupCommand()));
Map<ApplicationAccessType, String> acls = new HashMap<>(1);
acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
containerLaunchContext.setApplicationACLs(acls);
if (UserGroupInformation.isSecurityEnabled()) {
containerLaunchContext.setTokens(this.tokens.duplicate());
}
return containerLaunchContext;
}
private void addContainerLocalResources(Path destDir, Map<String, LocalResource> resourceMap) throws IOException {
if (!this.fs.exists(destDir)) {
LOGGER.warn(String.format("Path %s does not exist so no container LocalResource to add", destDir));
return;
}
FileStatus[] statuses = this.fs.listStatus(destDir);
if (statuses != null) {
for (FileStatus status : statuses) {
YarnHelixUtils.addFileAsLocalResource(this.fs, status.getPath(), LocalResourceType.FILE, resourceMap);
}
}
}
protected ByteBuffer getSecurityTokens() throws IOException {
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
Closer closer = Closer.create();
try {
DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
credentials.writeTokenStorageToStream(dataOutputBuffer);
// Remove the AM->RM token so that containers cannot access it
Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
while (tokenIterator.hasNext()) {
Token<?> token = tokenIterator.next();
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
tokenIterator.remove();
}
}
return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@VisibleForTesting
protected String buildContainerCommand(Container container, String helixParticipantId, String helixInstanceTag) {
String containerProcessName = GobblinYarnTaskRunner.class.getSimpleName();
StringBuilder containerCommand = new StringBuilder()
.append(ApplicationConstants.Environment.JAVA_HOME.$()).append("/bin/java")
.append(" -Xmx").append((int) (container.getResource().getMemory() * this.jvmMemoryXmxRatio) -
this.jvmMemoryOverheadMbs).append("M")
.append(" -D").append(GobblinYarnConfigurationKeys.JVM_USER_TIMEZONE_CONFIG).append("=").append(this.containerTimezone)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_DIR_NAME).append("=").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_FILE_NAME).append("=").append(containerProcessName).append(".").append(ApplicationConstants.STDOUT)
.append(" ").append(JvmUtils.formatJvmArguments(this.containerJvmArgs))
.append(" ").append(GobblinYarnTaskRunner.class.getName())
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)
.append(" ").append(this.applicationName)
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)
.append(" ").append(this.applicationId)
.append(" --").append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)
.append(" ").append(helixParticipantId);
if (!Strings.isNullOrEmpty(helixInstanceTag)) {
containerCommand.append(" --").append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME)
.append(" ").append(helixInstanceTag);
}
return containerCommand.append(" 1>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
containerProcessName).append(".").append(ApplicationConstants.STDOUT)
.append(" 2>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
containerProcessName).append(".").append(ApplicationConstants.STDERR).toString();
}
/**
* Check the exit status of a completed container and see if the replacement container
* should try to be started on the same node. Some exit status indicates a disk or
* node failure and in such cases the replacement container should try to be started on
* a different node.
*/
private boolean shouldStickToTheSameNode(int containerExitStatus) {
switch (containerExitStatus) {
case ContainerExitStatus.DISKS_FAILED:
return false;
case ContainerExitStatus.ABORTED:
// Mostly likely this exit status is due to node failures because the
// application itself will not release containers.
return false;
default:
// Stick to the same node for other cases if host affinity is enabled.
return this.containerHostAffinityEnabled;
}
}
/**
* Handle the completion of a container. A new container will be requested to replace the one
* that just exited. Depending on the exit status and if container host affinity is enabled,
* the new container may or may not try to be started on the same node.
* <p>
* A container completes in either of the following conditions: 1) some error happens in the
* container and caused the container to exit, 2) the container gets killed due to some reason,
* for example, if it runs over the allowed amount of virtual or physical memory, 3) the gets
* preempted by the ResourceManager, or 4) the container gets stopped by the ApplicationMaster.
* A replacement container is needed in all but the last case.
*/
protected void handleContainerCompletion(ContainerStatus containerStatus) {
ContainerInfo completedContainerInfo = this.containerMap.remove(containerStatus.getContainerId());
//Get the Helix instance name for the completed container. Because callbacks are processed asynchronously, we might
//encounter situations where handleContainerCompletion() is called before onContainersAllocated(), resulting in the
//containerId missing from the containersMap.
// We use removedContainerID to remember these containers and remove them from containerMap later when we call requestTargetNumberOfContainers method
if (completedContainerInfo == null) {
removedContainerID.putIfAbsent(containerStatus.getContainerId(), "");
}
String completedInstanceName = completedContainerInfo == null? UNKNOWN_HELIX_INSTANCE : completedContainerInfo.getHelixParticipantId();
String helixTag = completedContainerInfo == null ? helixInstanceTags : completedContainerInfo.getHelixTag();
if (completedContainerInfo != null) {
allocatedContainerCountMap.get(helixTag).decrementAndGet();
}
LOGGER.info(String.format("Container %s running Helix instance %s with tag %s has completed with exit status %d",
containerStatus.getContainerId(), completedInstanceName, helixTag, containerStatus.getExitStatus()));
if (!Strings.isNullOrEmpty(containerStatus.getDiagnostics())) {
LOGGER.info(String.format("Received the following diagnostics information for container %s: %s",
containerStatus.getContainerId(), containerStatus.getDiagnostics()));
}
switch(containerStatus.getExitStatus()) {
case(ContainerExitStatus.ABORTED):
if (handleAbortedContainer(containerStatus, completedContainerInfo, completedInstanceName)) {
return;
}
break;
case(1): // Same as linux exit status 1 Often occurs when launch_container.sh failed
LOGGER.info("Exit status 1. CompletedContainerInfo={}", completedContainerInfo);
break;
default:
break;
}
if (this.shutdownInProgress) {
return;
}
if(completedContainerInfo != null) {
this.helixInstanceRetryCount.putIfAbsent(completedInstanceName, new AtomicInteger(0));
int retryCount = this.helixInstanceRetryCount.get(completedInstanceName).incrementAndGet();
// Populate event metadata
Optional<ImmutableMap.Builder<String, String>> eventMetadataBuilder = Optional.absent();
if (this.eventSubmitter.isPresent()) {
eventMetadataBuilder = Optional.of(buildContainerStatusEventMetadata(containerStatus));
eventMetadataBuilder.get().put(GobblinYarnEventConstants.EventMetadata.HELIX_INSTANCE_ID, completedInstanceName);
eventMetadataBuilder.get().put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_RETRY_ATTEMPT, retryCount + "");
}
if (this.helixInstanceMaxRetries > 0 && retryCount > this.helixInstanceMaxRetries) {
if (this.eventSubmitter.isPresent()) {
this.eventSubmitter.get()
.submit(GobblinYarnEventConstants.EventNames.HELIX_INSTANCE_COMPLETION, eventMetadataBuilder.get().build());
}
LOGGER.warn("Maximum number of retries has been achieved for Helix instance " + completedInstanceName);
return;
}
// Add the Helix instance name of the completed container to the set of unused
// instance names so they can be reused by a replacement container.
LOGGER.info("Adding instance {} to the pool of unused instances", completedInstanceName);
this.unusedHelixInstanceNames.add(completedInstanceName);
if (this.eventSubmitter.isPresent()) {
this.eventSubmitter.get()
.submit(GobblinYarnEventConstants.EventNames.HELIX_INSTANCE_COMPLETION, eventMetadataBuilder.get().build());
}
}
Optional<Resource> newContainerResource = completedContainerInfo != null ?
Optional.of(completedContainerInfo.getContainer().getResource()) : Optional.absent();
LOGGER.info("Requesting a new container to replace {} to run Helix instance {} with helix tag {} and resource {}",
containerStatus.getContainerId(), completedInstanceName, helixTag, newContainerResource.orNull());
this.eventBus.post(new NewContainerRequest(
shouldStickToTheSameNode(containerStatus.getExitStatus()) && completedContainerInfo != null ?
Optional.of(completedContainerInfo.getContainer()) : Optional.absent(), newContainerResource));
}
/**
* Handles containers aborted. This method handles 2 cases:
* <ol>
* <li>
* Case 1: Gobblin AM intentionally requested container to be released (often because the number of helix tasks
* has decreased due to decreased traffic)
* </li>
* <li>
* Case 2: Unexpected hardware fault and the node is lost. Need to do specific Helix logic to ensure 2 helix tasks
* are not being run by the multiple containers
* </li>
* </ol>
* @param containerStatus
* @param completedContainerInfo
* @param completedInstanceName
* @return if release request was intentionally released (Case 1)
*/
private boolean handleAbortedContainer(ContainerStatus containerStatus, ContainerInfo completedContainerInfo,
String completedInstanceName) {
// Case 1: Container intentionally released
if (this.releasedContainerCache.getIfPresent(containerStatus.getContainerId()) != null) {
LOGGER.info("Container release requested, so not spawning a replacement for containerId {}", containerStatus.getContainerId());
if (completedContainerInfo != null) {
LOGGER.info("Adding instance {} to the pool of unused instances", completedInstanceName);
this.unusedHelixInstanceNames.add(completedInstanceName);
}
return true;
}
// Case 2: Container release was not requested. Likely, the container was running on a node on which the NM died.
// In this case, RM assumes that the containers are "lost", even though the container process may still be
// running on the node. We need to ensure that the Helix instances running on the orphaned containers
// are fenced off from the Helix cluster to avoid double publishing and state being committed by the
// instances.
LOGGER.info("Container {} aborted due to lost NM", containerStatus.getContainerId());
if (!UNKNOWN_HELIX_INSTANCE.equals(completedInstanceName)) {
String clusterName = this.helixManager.getClusterName();
//Disable the orphaned instance.
if (HelixUtils.isInstanceLive(helixManager, completedInstanceName)) {
LOGGER.info("Disabling the Helix instance {}", completedInstanceName);
this.helixManager.getClusterManagmentTool().enableInstance(clusterName, completedInstanceName, false);
}
}
return false;
}
private ImmutableMap.Builder<String, String> buildContainerStatusEventMetadata(ContainerStatus containerStatus) {
ImmutableMap.Builder<String, String> eventMetadataBuilder = new ImmutableMap.Builder<>();
eventMetadataBuilder.put(GobblinYarnMetricTagNames.CONTAINER_ID, containerStatus.getContainerId().toString());
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_CONTAINER_STATE,
containerStatus.getState().toString());
if (ContainerExitStatus.INVALID != containerStatus.getExitStatus()) {
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_EXIT_STATUS,
containerStatus.getExitStatus() + "");
}
if (!Strings.isNullOrEmpty(containerStatus.getDiagnostics())) {
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_EXIT_DIAGNOSTICS,
containerStatus.getDiagnostics());
}
return eventMetadataBuilder;
}
/**
* Get the number of matching container requests for the specified resource memory and cores.
* Due to YARN-1902 and YARN-660, this API is not 100% accurate. {@link AMRMClientCallbackHandler#onContainersAllocated(List)}
* contains logic for best effort clean up of requests, and the resource tend to match the allocated container. So in practice the count is pretty accurate.
* <p>
* This API call gets the count of container requests for containers that are > resource if there is no request with the exact same resource
* The RM can return containers that are larger (because of normalization etc).
* Container may be larger by memory or cpu (e.g. container (1000M, 3cpu) can fit request (1000M, 1cpu) or request (500M, 3cpu).
* <p>
* Thankfully since each helix tag / resource has a different priority, matching requests for one helix tag / resource
* have complete isolation from another helix tag / resource
*/
private int getMatchingRequestsCount(Resource resource) {
Integer priorityNum = resourcePriorityMap.get(resource.toString());
if (priorityNum == null) { // request has never been made with this resource
return 0;
}
Priority priority = Priority.newInstance(priorityNum);
// Each collection in the list represents a set of requests with each with the same resource requirement.
// The reason for differing resources can be due to normalization
List<? extends Collection<AMRMClient.ContainerRequest>> outstandingRequests = getAmrmClientAsync().getMatchingRequests(priority, ResourceRequest.ANY, resource);
return outstandingRequests == null ? 0 : outstandingRequests.stream()
.filter(Objects::nonNull)
.mapToInt(Collection::size)
.sum();
}
/**
* A custom implementation of {@link AMRMClientAsync.CallbackHandler}.
*/
private class AMRMClientCallbackHandler implements AMRMClientAsync.CallbackHandler {
private volatile boolean done = false;
@Override
public void onContainersCompleted(List<ContainerStatus> statuses) {
for (ContainerStatus containerStatus : statuses) {
handleContainerCompletion(containerStatus);
}
}
@Override
public void onContainersAllocated(List<Container> containers) {
for (final Container container : containers) {
String containerId = container.getId().toString();
String containerHelixTag = YarnHelixUtils.findHelixTagForContainer(container, allocatedContainerCountMap, yarnContainerRequest);
if (Strings.isNullOrEmpty(containerHelixTag)) {
containerHelixTag = helixInstanceTags;
}
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_ALLOCATION,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId);
}
LOGGER.info("Container {} has been allocated with resource {} for helix tag {}",
container.getId(), container.getResource(), containerHelixTag);
//Iterate over the (thread-safe) set of unused instances to find the first instance that is not currently live.
//Once we find a candidate instance, it is removed from the set.
String instanceName = null;
//Ensure that updates to unusedHelixInstanceNames are visible to other threads that might concurrently
//invoke the callback on container allocation.
synchronized (this) {
Iterator<String> iterator = unusedHelixInstanceNames.iterator();
while (iterator.hasNext()) {
instanceName = iterator.next();
if (!HelixUtils.isInstanceLive(helixManager, instanceName)) {
iterator.remove();
LOGGER.info("Found an unused instance {}", instanceName);
break;
} else {
//Reset the instance name to null, since this instance name is live.
instanceName = null;
}
}
}
if (Strings.isNullOrEmpty(instanceName)) {
// No unused instance name, so generating a new one.
instanceName = HelixUtils
.getHelixInstanceName(HELIX_YARN_INSTANCE_NAME_PREFIX, helixInstanceIdGenerator.incrementAndGet());
}
ContainerInfo containerInfo = new ContainerInfo(container, instanceName, containerHelixTag);
containerMap.put(container.getId(), containerInfo);
allocatedContainerCountMap.putIfAbsent(containerHelixTag, new AtomicInteger(0));
allocatedContainerCountMap.get(containerHelixTag).incrementAndGet();
// Find matching requests and remove the request (YARN-660). We the scheduler are responsible
// for cleaning up requests after allocation based on the design in the described ticket.
// YARN does not have a delta request API and the requests are not cleaned up automatically.
// Try finding a match first with the host as the resource name then fall back to any resource match.
// Also see YARN-1902. Container count will explode without this logic for removing container requests.
List<? extends Collection<AMRMClient.ContainerRequest>> matchingRequests = amrmClientAsync
.getMatchingRequests(container.getPriority(), container.getNodeHttpAddress(), container.getResource());
if (matchingRequests.isEmpty()) {
LOGGER.debug("Matching request by host {} not found", container.getNodeHttpAddress());
matchingRequests = amrmClientAsync
.getMatchingRequests(container.getPriority(), ResourceRequest.ANY, container.getResource());
}
if (!matchingRequests.isEmpty()) {
AMRMClient.ContainerRequest firstMatchingContainerRequest = matchingRequests.get(0).iterator().next();
LOGGER.debug("Found matching requests {}, removing first matching request {}",
matchingRequests, firstMatchingContainerRequest);
amrmClientAsync.removeContainerRequest(firstMatchingContainerRequest);
}
containerLaunchExecutor.submit(new Runnable() {
@Override
public void run() {
try {
LOGGER.info("Starting container " + containerId);
nmClientAsync.startContainerAsync(container, newContainerLaunchContext(containerInfo));
} catch (IOException ioe) {
LOGGER.error("Failed to start container " + containerId, ioe);
}
}
});
}
}
@Override
public void onShutdownRequest() {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.SHUTDOWN_REQUEST);
}
LOGGER.info("Received shutdown request from the ResourceManager");
this.done = true;
eventBus.post(new ClusterManagerShutdownRequest());
}
@Override
public void onNodesUpdated(List<NodeReport> updatedNodes) {
for (NodeReport nodeReport : updatedNodes) {
LOGGER.info("Received node update report: " + nodeReport);
}
}
@Override
public float getProgress() {
return this.done ? 1.0f : 0.0f;
}
@Override
public void onError(Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.ERROR,
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error("Received error: " + t, t);
this.done = true;
eventBus.post(new ClusterManagerShutdownRequest());
}
}
/**
* A custom implementation of {@link NMClientAsync.CallbackHandler}.
*/
class NMClientCallbackHandler implements NMClientAsync.CallbackHandler {
@Override
public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STARTED,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString());
}
LOGGER.info(String.format("Container %s has been started", containerId));
}
@Override
public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STATUS_RECEIVED,
buildContainerStatusEventMetadata(containerStatus).build());
}
LOGGER.info(String.format("Received container status for container %s: %s", containerId, containerStatus));
}
@Override
public void onContainerStopped(ContainerId containerId) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STOPPED,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString());
}
LOGGER.info(String.format("Container %s has been stopped", containerId));
if (containerMap.isEmpty()) {
synchronized (allContainersStopped) {
allContainersStopped.notify();
}
}
}
@Override
public void onStartContainerError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_START_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to start container %s due to error %s", containerId, t));
}
@Override
public void onGetContainerStatusError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_GET_STATUS_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to get status for container %s due to error %s", containerId, t));
}
@Override
public void onStopContainerError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STOP_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to stop container %s due to error %s", containerId, t));
}
}
// Class encapsulates Container instances, Helix participant IDs of the containers, Helix Tag, and
// initial startup command
@Getter
class ContainerInfo {
private final Container container;
private final String helixParticipantId;
private final String helixTag;
private final String startupCommand;
public ContainerInfo(Container container, String helixParticipantId, String helixTag) {
this.container = container;
this.helixParticipantId = helixParticipantId;
this.helixTag = helixTag;
this.startupCommand = YarnService.this.buildContainerCommand(container, helixParticipantId, helixTag);
}
@Override
public String toString() {
return String.format("ContainerInfo{ container=%s, helixParticipantId=%s, helixTag=%s, startupCommand=%s }",
container.getId(), helixParticipantId, helixTag, startupCommand);
}
}
}
| 1,914 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/HelixMessageSubTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
/**
* An enumeration of Helix message sub types.
*
* @author Yinan Li
*/
public enum HelixMessageSubTypes {
/**
* This type is for messages sent when the {@link GobblinApplicationMaster} is to be shutdown.
*/
APPLICATION_MASTER_SHUTDOWN,
/**
* This type is for messages sent when the file storing the delegation token has been updated.
*/
TOKEN_FILE_UPDATED
}
| 1,915 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/HelixInstancePurgerWithMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.helix.HelixAdmin;
@Slf4j
@AllArgsConstructor
public class HelixInstancePurgerWithMetrics {
private final EventSubmitter eventSubmitter;
private final long pollingRateMs;
private static final String PREFIX = "HelixOfflineInstancePurge.";
public static final String PURGE_FAILURE_EVENT = PREFIX + "Failure";
public static final String PURGE_LAGGING_EVENT = PREFIX + "Lagging";
public static final String PURGE_COMPLETED_EVENT = PREFIX + "Completed";
/**
* Blocking call for purging all offline helix instances. Provides boilerplate code for providing periodic updates
* and sending a GTE if it's an unexpected amount of time.
* <p>
* All previous helix instances should be purged on startup. Gobblin task runners are stateless from helix
* perspective because all important state is persisted separately in Workunit State Store or Watermark store.
*/
public void purgeAllOfflineInstances(HelixAdmin admin, String clusterName, long laggingThresholdMs, Map<String, String> gteMetadata) {
CompletableFuture<Void> purgeTask = CompletableFuture.supplyAsync(() -> {
long offlineDuration = 0; // 0 means all offline instance should be purged.
admin.purgeOfflineInstances(clusterName, offlineDuration);
return null;
});
long timeToPurgeMs = waitForPurgeCompletion(purgeTask, laggingThresholdMs, Stopwatch.createUnstarted(), gteMetadata);
log.info("Finished purging offline helix instances. It took timeToPurgeMs={}", timeToPurgeMs);
}
@VisibleForTesting
long waitForPurgeCompletion(CompletableFuture<Void> purgeTask, long laggingThresholdMs, Stopwatch watch,
Map<String, String> gteMetadata) {
watch.start();
try {
boolean haveSubmittedLaggingEvent = false; //
while (!purgeTask.isDone()) {
long elapsedTimeMs = watch.elapsed(TimeUnit.MILLISECONDS);
log.info("Waiting for helix to purge offline instances. Cannot proceed with execution because purging is a "
+ "non-thread safe call. To disable purging offline instances during startup, change the flag {} "
+ "elapsedTimeMs={}, laggingThresholdMs={}",
GobblinYarnConfigurationKeys.HELIX_PURGE_OFFLINE_INSTANCES_ENABLED, elapsedTimeMs, laggingThresholdMs);
if (!haveSubmittedLaggingEvent && elapsedTimeMs > laggingThresholdMs) {
submitLaggingEvent(elapsedTimeMs, laggingThresholdMs, gteMetadata);
haveSubmittedLaggingEvent = true;
}
Thread.sleep(this.pollingRateMs);
}
long timeToPurgeMs = watch.elapsed(TimeUnit.MILLISECONDS);
if (!haveSubmittedLaggingEvent && timeToPurgeMs > laggingThresholdMs) {
submitLaggingEvent(timeToPurgeMs, laggingThresholdMs, gteMetadata);
}
purgeTask.get(); // check for exceptions
submitCompletedEvent(timeToPurgeMs, gteMetadata);
return timeToPurgeMs;
} catch (ExecutionException | InterruptedException e) {
log.warn("The call to purge offline helix instances failed. This is not a fatal error because it is not mandatory to "
+ "clean up old helix instances. But repeated failure to purge offline helix instances will cause an accumulation"
+ "of offline helix instances which may cause large delays in future helix calls.", e);
long timeToPurgeMs = watch.elapsed(TimeUnit.MILLISECONDS);
submitFailureEvent(timeToPurgeMs, gteMetadata);
return timeToPurgeMs;
}
}
private void submitFailureEvent(long elapsedTimeMs, Map<String, String> additionalMetadata) {
if (eventSubmitter != null) {
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(PURGE_FAILURE_EVENT);
eventBuilder.addAdditionalMetadata(additionalMetadata);
eventBuilder.addMetadata("elapsedTimeMs", String.valueOf(elapsedTimeMs));
log.warn("Submitting GTE because purging offline instances has failed to complete. event={}", eventBuilder);
eventSubmitter.submit(eventBuilder);
} else {
log.warn("Cannot submit {} GTE because eventSubmitter is null", PURGE_FAILURE_EVENT);
}
}
private void submitCompletedEvent(long timeToPurgeMs, Map<String, String> additionalMetadata) {
if (eventSubmitter != null) {
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(PURGE_COMPLETED_EVENT);
eventBuilder.addAdditionalMetadata(additionalMetadata);
eventBuilder.addMetadata("timeToPurgeMs", String.valueOf(timeToPurgeMs));
log.info("Submitting GTE because purging offline instances has completed successfully. event={}", eventBuilder);
eventSubmitter.submit(eventBuilder);
} else {
log.warn("Cannot submit {} GTE because eventSubmitter is null", PURGE_COMPLETED_EVENT);
}
}
private void submitLaggingEvent(long elapsedTimeMs, long laggingThresholdMs,
Map<String, String> additionalMetadata) {
if (eventSubmitter != null) {
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(PURGE_LAGGING_EVENT);
eventBuilder.addAdditionalMetadata(additionalMetadata);
eventBuilder.addMetadata("elapsedTimeMs", String.valueOf(elapsedTimeMs));
eventBuilder.addMetadata("laggingThresholdMs", String.valueOf(laggingThresholdMs));
log.info("Submitting GTE because purging offline instances is lagging and has exceeded lagging threshold. event={}",
eventBuilder);
eventSubmitter.submit(eventBuilder);
} else {
log.warn("Cannot submit {} GTE because eventSubmitter is null", PURGE_LAGGING_EVENT);
}
}
}
| 1,916 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnEventConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
/**
* YARN specific event constants to be used with an {@link org.apache.gobblin.metrics.event.EventSubmitter}.
*/
public class GobblinYarnEventConstants {
public static final String EVENT_NAMESPACE = "gobblin.yarn";
public static final String EVENT_CONTEXT_NAME = "GobblinYarn";
public static class EventMetadata {
public static final String CONTAINER_STATUS_EXIT_STATUS = "containerStatus.exitStatus";
public static final String CONTAINER_STATUS_EXIT_DIAGNOSTICS = "containerStatus.diagnostics";
public static final String CONTAINER_STATUS_RETRY_ATTEMPT = "containerStatus.retryAttempt";
public static final String CONTAINER_STATUS_CONTAINER_STATE = "containerStatus.state";
public static final String ERROR_EXCEPTION = "errorException";
public static final String HELIX_INSTANCE_ID = "helixInstanceId";
}
public static class EventNames {
public static final String CONTAINER_ALLOCATION = "ContainerAllocation";
public static final String CONTAINER_STARTED = "ContainerStarted";
public static final String CONTAINER_STATUS_RECEIVED = "ContainerStatusReceived";
public static final String CONTAINER_STOPPED = "ContainerStopped";
public static final String CONTAINER_START_ERROR = "ContainerStartError";
public static final String CONTAINER_GET_STATUS_ERROR = "ContainerGetStatusError";
public static final String CONTAINER_STOP_ERROR = "ContainerStopError";
public static final String ERROR = "Error";
public static final String HELIX_INSTANCE_COMPLETION = "HelixInstanceCompletion";
public static final String SHUTDOWN_REQUEST = "ShutdownRequest";
}
}
| 1,917 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnAppLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.avro.Schema;
import org.apache.commons.io.FileUtils;
import org.apache.commons.mail.EmailException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.Records;
import org.apache.helix.Criteria;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.Service;
import com.google.common.util.concurrent.ServiceManager;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigRenderOptions;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinHelixConstants;
import org.apache.gobblin.cluster.GobblinHelixMessagingService;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.rest.JobExecutionInfoServer;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.EmailUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.hadoop.TokenUtils;
import org.apache.gobblin.util.io.StreamUtils;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.yarn.event.ApplicationReportArrivalEvent;
import org.apache.gobblin.yarn.event.GetApplicationReportFailureEvent;
import static org.apache.hadoop.security.UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
/**
* A client driver to launch Gobblin as a Yarn application.
*
* <p>
* This class, upon starting, will check if there's a Yarn application that it has previously submitted and
* it is able to reconnect to. More specifically, it checks if an application with the same application name
* exists and can be reconnected to, i.e., if the application has not completed yet. If so, it simply starts
* monitoring that application.
* </p>
*
* <p>
* On the other hand, if there's no such a reconnectable Yarn application, This class will launch a new Yarn
* application and start the {@link GobblinApplicationMaster}. It also persists the new application ID so it
* is able to reconnect to the Yarn application if it is restarted for some reason. Once the application is
* launched, this class starts to monitor the application by periodically polling the status of the application
* through a {@link ListeningExecutorService}.
* </p>
*
* <p>
* If a shutdown signal is received, it sends a Helix
* {@link org.apache.helix.model.Message.MessageType#SCHEDULER_MSG} to the {@link GobblinApplicationMaster}
* asking it to shutdown and release all the allocated containers. It also sends an email notification for
* the shutdown if {@link GobblinYarnConfigurationKeys#EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY} is {@code true}.
* </p>
*
* <p>
* This class has a scheduled task to get the {@link ApplicationReport} of the Yarn application periodically.
* Since it may fail to get the {@link ApplicationReport} due to reason such as the Yarn cluster is down for
* maintenance, it keeps track of the count of consecutive failures to get the {@link ApplicationReport}. If
* this count exceeds the maximum number allowed, it will initiate a shutdown.
* </p>
*
* <p>
* Users of {@link GobblinYarnAppLauncher} need to call {@link #initializeYarnClients} which a child class can override.
* </p>
*
* @author Yinan Li
*/
public class GobblinYarnAppLauncher {
public static final String GOBBLIN_YARN_CONFIG_OUTPUT_PATH = "gobblin.yarn.configOutputPath";
//Configuration key to signal the GobblinYarnAppLauncher mode
public static final String GOBBLIN_YARN_APP_LAUNCHER_MODE = "gobblin.yarn.appLauncherMode";
public static final String DEFAULT_GOBBLIN_YARN_APP_LAUNCHER_MODE = "";
public static final String AZKABAN_APP_LAUNCHER_MODE_KEY = "azkaban";
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinYarnAppLauncher.class);
private static final Splitter SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private static final String GOBBLIN_YARN_APPLICATION_TYPE = "GOBBLIN_YARN";
// The set of Yarn application types this class is interested in. This is used to
// lookup the application this class has launched previously upon restarting.
private static final Set<String> APPLICATION_TYPES = ImmutableSet.of(GOBBLIN_YARN_APPLICATION_TYPE);
// The set of Yarn application states under which the driver can reconnect to the Yarn application after restart
private static final EnumSet<YarnApplicationState> RECONNECTABLE_APPLICATION_STATES = EnumSet.of(
YarnApplicationState.NEW,
YarnApplicationState.NEW_SAVING,
YarnApplicationState.SUBMITTED,
YarnApplicationState.ACCEPTED,
YarnApplicationState.RUNNING
);
private final String applicationName;
private final String appQueueName;
private final String appViewAcl;
private final Config config;
private final HelixManager helixManager;
protected final Configuration yarnConfiguration;
private final FileSystem fs;
private final EventBus eventBus = new EventBus(GobblinYarnAppLauncher.class.getSimpleName());
private final ScheduledExecutorService applicationStatusMonitor;
private final long appReportIntervalMinutes;
private final Optional<String> appMasterJvmArgs;
private final Path sinkLogRootDir;
private final Closer closer = Closer.create();
private final String helixInstanceName;
private final GobblinHelixMessagingService messagingService;
// Yarn application ID
private volatile Optional<ApplicationId> applicationId = Optional.absent();
private volatile Optional<ServiceManager> serviceManager = Optional.absent();
// Maximum number of consecutive failures allowed to get the ApplicationReport
private final int maxGetApplicationReportFailures;
// A count on the number of consecutive failures on getting the ApplicationReport
private final AtomicInteger getApplicationReportFailureCount = new AtomicInteger();
// This flag tells if the Yarn application has already completed. This is used to
// tell if it is necessary to send a shutdown message to the ApplicationMaster.
private volatile boolean applicationCompleted = false;
private volatile boolean stopped = false;
private final boolean emailNotificationOnShutdown;
private final boolean isHelixClusterManaged;
private final boolean detachOnExitEnabled;
private final int appMasterMemoryMbs;
private final int jvmMemoryOverheadMbs;
private final double jvmMemoryXmxRatio;
private Optional<AbstractYarnAppSecurityManager> securityManager = Optional.absent();
private final String containerTimezone;
private final String appLauncherMode;
protected final String originalYarnRMAddress;
protected final Map<String, YarnClient> potentialYarnClients = new HashMap<>();
private YarnClient yarnClient;
public GobblinYarnAppLauncher(Config config, YarnConfiguration yarnConfiguration) throws IOException {
this.config = config;
this.applicationName = config.getString(GobblinYarnConfigurationKeys.APPLICATION_NAME_KEY);
this.appQueueName = config.getString(GobblinYarnConfigurationKeys.APP_QUEUE_KEY);
String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
LOGGER.info("Using ZooKeeper connection string: " + zkConnectionString);
this.helixManager = HelixManagerFactory.getZKHelixManager(
config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), GobblinClusterUtils.getHostname(),
InstanceType.SPECTATOR, zkConnectionString);
this.yarnConfiguration = yarnConfiguration;
YarnHelixUtils.setYarnClassPath(config, this.yarnConfiguration);
YarnHelixUtils.setAdditionalYarnClassPath(config, this.yarnConfiguration);
this.yarnConfiguration.set("fs.automatic.close", "false");
this.originalYarnRMAddress = this.yarnConfiguration.get(GobblinYarnConfigurationKeys.YARN_RESOURCE_MANAGER_ADDRESS);
this.fs = GobblinClusterUtils.buildFileSystem(config, this.yarnConfiguration);
this.closer.register(this.fs);
this.applicationStatusMonitor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("GobblinYarnAppStatusMonitor")));
this.appReportIntervalMinutes = config.getLong(GobblinYarnConfigurationKeys.APP_REPORT_INTERVAL_MINUTES_KEY);
this.appMasterJvmArgs = config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY) ?
Optional.of(config.getString(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY)) :
Optional.<String>absent();
this.sinkLogRootDir = new Path(config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY));
this.maxGetApplicationReportFailures = config.getInt(GobblinYarnConfigurationKeys.MAX_GET_APP_REPORT_FAILURES_KEY);
this.emailNotificationOnShutdown =
config.getBoolean(GobblinYarnConfigurationKeys.EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY);
this.appMasterMemoryMbs = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_MEMORY_MBS_KEY);
this.jvmMemoryXmxRatio = ConfigUtils.getDouble(this.config,
GobblinYarnConfigurationKeys.APP_MASTER_JVM_MEMORY_XMX_RATIO_KEY,
GobblinYarnConfigurationKeys.DEFAULT_APP_MASTER_JVM_MEMORY_XMX_RATIO);
Preconditions.checkArgument(this.jvmMemoryXmxRatio >= 0 && this.jvmMemoryXmxRatio <= 1,
GobblinYarnConfigurationKeys.APP_MASTER_JVM_MEMORY_XMX_RATIO_KEY + " must be between 0 and 1 inclusive");
this.jvmMemoryOverheadMbs = ConfigUtils.getInt(this.config,
GobblinYarnConfigurationKeys.APP_MASTER_JVM_MEMORY_OVERHEAD_MBS_KEY,
GobblinYarnConfigurationKeys.DEFAULT_APP_MASTER_JVM_MEMORY_OVERHEAD_MBS);
Preconditions.checkArgument(this.jvmMemoryOverheadMbs < this.appMasterMemoryMbs * this.jvmMemoryXmxRatio,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY + " cannot be more than "
+ GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY + " * "
+ GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY);
this.appViewAcl = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.APP_VIEW_ACL,
GobblinYarnConfigurationKeys.DEFAULT_APP_VIEW_ACL);
this.containerTimezone = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_TIMEZONE,
GobblinYarnConfigurationKeys.DEFAULT_GOBBLIN_YARN_CONTAINER_TIMEZONE);
this.isHelixClusterManaged = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.IS_HELIX_CLUSTER_MANAGED,
GobblinClusterConfigurationKeys.DEFAULT_IS_HELIX_CLUSTER_MANAGED);
this.helixInstanceName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
GobblinClusterManager.class.getSimpleName());
this.detachOnExitEnabled = ConfigUtils
.getBoolean(config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_DETACH_ON_EXIT_ENABLED,
GobblinYarnConfigurationKeys.DEFAULT_GOBBLIN_YARN_DETACH_ON_EXIT);
this.appLauncherMode = ConfigUtils.getString(config, GOBBLIN_YARN_APP_LAUNCHER_MODE, DEFAULT_GOBBLIN_YARN_APP_LAUNCHER_MODE);
this.messagingService = new GobblinHelixMessagingService(this.helixManager);
try {
config = addDynamicConfig(config);
outputConfigToFile(config);
} catch (SchemaRegistryException e) {
throw new IOException(e);
}
}
public void initializeYarnClients(Config config) {
Set<String> potentialRMAddresses = new HashSet<>(ConfigUtils.getStringList(config, GobblinYarnConfigurationKeys.OTHER_YARN_RESOURCE_MANAGER_ADDRESSES));
potentialRMAddresses.add(originalYarnRMAddress);
for (String rmAddress : potentialRMAddresses) {
YarnClient tmpYarnClient = YarnClient.createYarnClient();
this.yarnConfiguration.set(GobblinYarnConfigurationKeys.YARN_RESOURCE_MANAGER_ADDRESS, rmAddress);
tmpYarnClient.init(new YarnConfiguration(this.yarnConfiguration));
this.potentialYarnClients.put(rmAddress, tmpYarnClient);
}
}
/**
* Launch a new Gobblin instance on Yarn.
*
* @throws IOException if there's something wrong launching the application
* @throws YarnException if there's something wrong launching the application
*/
public void launch() throws IOException, YarnException, InterruptedException {
this.eventBus.register(this);
if (this.isHelixClusterManaged) {
LOGGER.info("Helix cluster is managed; skipping creation of Helix cluster");
} else {
String clusterName = this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
boolean overwriteExistingCluster = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.HELIX_CLUSTER_OVERWRITE_KEY,
GobblinClusterConfigurationKeys.DEFAULT_HELIX_CLUSTER_OVERWRITE);
LOGGER.info("Creating Helix cluster {} with overwrite: {}", clusterName, overwriteExistingCluster);
HelixUtils.createGobblinHelixCluster(this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY),
clusterName, overwriteExistingCluster);
LOGGER.info("Created Helix cluster " + clusterName);
}
connectHelixManager();
//Before connect with yarn client, we need to login to get the token
if(ConfigUtils.getBoolean(config, GobblinYarnConfigurationKeys.ENABLE_KEY_MANAGEMENT, false)) {
this.securityManager = Optional.of(buildSecurityManager());
this.securityManager.get().loginAndScheduleTokenRenewal();
}
startYarnClient();
this.applicationId = getReconnectableApplicationId();
if (!this.applicationId.isPresent()) {
LOGGER.info("No reconnectable application found so submitting a new application");
this.yarnClient = potentialYarnClients.get(this.originalYarnRMAddress);
this.applicationId = Optional.of(setupAndSubmitApplication());
}
this.applicationStatusMonitor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
eventBus.post(new ApplicationReportArrivalEvent(yarnClient.getApplicationReport(applicationId.get())));
} catch (YarnException | IOException e) {
LOGGER.error("Failed to get application report for Gobblin Yarn application " + applicationId.get(), e);
eventBus.post(new GetApplicationReportFailureEvent(e));
}
}
}, 0, this.appReportIntervalMinutes, TimeUnit.MINUTES);
addServices();
}
private void addServices() throws IOException{
List<Service> services = Lists.newArrayList();
if (this.securityManager.isPresent()) {
LOGGER.info("Adding KeyManagerService since key management is enabled");
services.add(this.securityManager.get());
}
if (!this.config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_DISABLE_DRIVER_COPY) ||
!this.config.getBoolean(GobblinYarnConfigurationKeys.LOG_COPIER_DISABLE_DRIVER_COPY)) {
services.add(buildLogCopier(this.config,
new Path(this.sinkLogRootDir, this.applicationName + Path.SEPARATOR + this.applicationId.get().toString()),
GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, this.applicationId.get().toString())));
}
if (config.getBoolean(ConfigurationKeys.JOB_EXECINFO_SERVER_ENABLED_KEY)) {
LOGGER.info("Starting the job execution info server since it is enabled");
Properties properties = ConfigUtils.configToProperties(config);
JobExecutionInfoServer executionInfoServer = new JobExecutionInfoServer(properties);
services.add(executionInfoServer);
if (config.getBoolean(ConfigurationKeys.ADMIN_SERVER_ENABLED_KEY)) {
LOGGER.info("Starting the admin UI server since it is enabled");
services.add(ServiceBasedAppLauncher.createAdminServer(properties,
executionInfoServer.getAdvertisedServerUri()));
}
} else if (config.getBoolean(ConfigurationKeys.ADMIN_SERVER_ENABLED_KEY)) {
LOGGER.warn("NOT starting the admin UI because the job execution info server is NOT enabled");
}
if (services.size() > 0 ) {
this.serviceManager = Optional.of(new ServiceManager(services));
this.serviceManager.get().startAsync();
} else {
serviceManager = Optional.absent();
}
}
/**
* Stop this {@link GobblinYarnAppLauncher} instance.
*
* @throws IOException if this {@link GobblinYarnAppLauncher} instance fails to clean up its working directory.
*/
public synchronized void stop() throws IOException, TimeoutException {
if (this.stopped) {
return;
}
LOGGER.info("Stopping the " + GobblinYarnAppLauncher.class.getSimpleName());
try {
if (this.applicationId.isPresent() && !this.applicationCompleted && !this.detachOnExitEnabled) {
// Only send the shutdown message if the application has been successfully submitted and is still running
sendShutdownRequest();
}
if (this.serviceManager.isPresent()) {
this.serviceManager.get().stopAsync().awaitStopped(5, TimeUnit.MINUTES);
}
ExecutorsUtils.shutdownExecutorService(this.applicationStatusMonitor, Optional.of(LOGGER), 5, TimeUnit.MINUTES);
stopYarnClient();
if (!this.detachOnExitEnabled) {
LOGGER.info("Disabling all live Helix instances..");
}
disconnectHelixManager();
} finally {
try {
if (this.applicationId.isPresent() && !this.detachOnExitEnabled) {
cleanUpAppWorkDirectory(this.applicationId.get());
}
} finally {
this.closer.close();
}
}
this.stopped = true;
}
@Subscribe
public void handleApplicationReportArrivalEvent(ApplicationReportArrivalEvent applicationReportArrivalEvent) {
ApplicationReport applicationReport = applicationReportArrivalEvent.getApplicationReport();
YarnApplicationState appState = applicationReport.getYarnApplicationState();
LOGGER.info("Gobblin Yarn application state: " + appState.toString());
// Reset the count on failures to get the ApplicationReport when there's one success
this.getApplicationReportFailureCount.set(0);
if (appState == YarnApplicationState.FINISHED ||
appState == YarnApplicationState.FAILED ||
appState == YarnApplicationState.KILLED) {
applicationCompleted = true;
LOGGER.info("Gobblin Yarn application finished with final status: " +
applicationReport.getFinalApplicationStatus().toString());
if (applicationReport.getFinalApplicationStatus() == FinalApplicationStatus.FAILED) {
LOGGER.error("Gobblin Yarn application failed for the following reason: " + applicationReport.getDiagnostics());
}
try {
GobblinYarnAppLauncher.this.stop();
} catch (IOException ioe) {
LOGGER.error("Failed to close the " + GobblinYarnAppLauncher.class.getSimpleName(), ioe);
} catch (TimeoutException te) {
LOGGER.error("Timeout in stopping the service manager", te);
} finally {
if (this.emailNotificationOnShutdown) {
sendEmailOnShutdown(Optional.of(applicationReport));
}
}
}
}
@Subscribe
public void handleGetApplicationReportFailureEvent(
GetApplicationReportFailureEvent getApplicationReportFailureEvent) {
int numConsecutiveFailures = this.getApplicationReportFailureCount.incrementAndGet();
if (numConsecutiveFailures > this.maxGetApplicationReportFailures) {
LOGGER.warn(String
.format("Number of consecutive failures to get the ApplicationReport %d exceeds the threshold %d",
numConsecutiveFailures, this.maxGetApplicationReportFailures));
try {
stop();
} catch (IOException ioe) {
LOGGER.error("Failed to close the " + GobblinYarnAppLauncher.class.getSimpleName(), ioe);
} catch (TimeoutException te) {
LOGGER.error("Timeout in stopping the service manager", te);
} finally {
if (this.emailNotificationOnShutdown) {
sendEmailOnShutdown(Optional.<ApplicationReport>absent());
}
}
}
}
@VisibleForTesting
void connectHelixManager() {
try {
this.helixManager.connect();
} catch (Exception e) {
LOGGER.error("HelixManager failed to connect", e);
throw Throwables.propagate(e);
}
}
@VisibleForTesting
void disconnectHelixManager() {
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
}
@VisibleForTesting
void startYarnClient() {
for (YarnClient yarnClient : potentialYarnClients.values()) {
yarnClient.start();
}
}
@VisibleForTesting
void stopYarnClient() {
for (YarnClient yarnClient : potentialYarnClients.values()) {
yarnClient.stop();
}
}
/**
* A utility method that removes the "application_" prefix from the Yarn application id when the {@link GobblinYarnAppLauncher}
* is launched via Azkaban. This is because when an Azkaban application is killed, Azkaban finds the Yarn application id
* from the logs by searching for the pattern "application_". This is a hacky workaround to prevent Azkaban to detect the
* Yarn application id from the logs.
* @param applicationId
* @return a sanitized application Id in the Azkaban mode.
*/
private String sanitizeApplicationId(String applicationId) {
if (this.detachOnExitEnabled && this.appLauncherMode.equalsIgnoreCase(AZKABAN_APP_LAUNCHER_MODE_KEY)) {
applicationId = applicationId.replaceAll("application_", "");
}
return applicationId;
}
@VisibleForTesting
Optional<ApplicationId> getReconnectableApplicationId() throws YarnException, IOException {
for (YarnClient yarnClient: potentialYarnClients.values()) {
List<ApplicationReport> applicationReports = yarnClient.getApplications(APPLICATION_TYPES, RECONNECTABLE_APPLICATION_STATES);
if (applicationReports == null || applicationReports.isEmpty()) {
continue;
}
// Try to find an application with a matching application name
for (ApplicationReport applicationReport : applicationReports) {
if (this.applicationName.equals(applicationReport.getName())) {
String applicationId = sanitizeApplicationId(applicationReport.getApplicationId().toString());
LOGGER.info("Found reconnectable application with application ID: " + applicationId);
LOGGER.info("Application Tracking URL: " + applicationReport.getTrackingUrl());
this.yarnClient = yarnClient;
return Optional.of(applicationReport.getApplicationId());
}
}
}
return Optional.absent();
}
/**
* Setup and submit the Gobblin Yarn application.
*
* @throws IOException if there's anything wrong setting up and submitting the Yarn application
* @throws YarnException if there's anything wrong setting up and submitting the Yarn application
*/
@VisibleForTesting
ApplicationId setupAndSubmitApplication() throws IOException, YarnException, InterruptedException {
LOGGER.info("creating new yarn application");
YarnClientApplication gobblinYarnApp = this.yarnClient.createApplication();
ApplicationSubmissionContext appSubmissionContext = gobblinYarnApp.getApplicationSubmissionContext();
appSubmissionContext.setApplicationType(GOBBLIN_YARN_APPLICATION_TYPE);
appSubmissionContext.setMaxAppAttempts(ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.APP_MASTER_MAX_ATTEMPTS_KEY, GobblinYarnConfigurationKeys.DEFAULT_APP_MASTER_MAX_ATTEMPTS_KEY));
ApplicationId applicationId = appSubmissionContext.getApplicationId();
LOGGER.info("created new yarn application: "+ applicationId.getId());
GetNewApplicationResponse newApplicationResponse = gobblinYarnApp.getNewApplicationResponse();
// Set up resource type requirements for ApplicationMaster
Resource resource = prepareContainerResource(newApplicationResponse);
// Add lib jars, and jars and files that the ApplicationMaster need as LocalResources
Map<String, LocalResource> appMasterLocalResources = addAppMasterLocalResources(applicationId);
ContainerLaunchContext amContainerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
amContainerLaunchContext.setLocalResources(appMasterLocalResources);
amContainerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
amContainerLaunchContext.setCommands(Lists.newArrayList(buildApplicationMasterCommand(applicationId.toString(), resource.getMemory())));
Map<ApplicationAccessType, String> acls = new HashMap<>(1);
acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
amContainerLaunchContext.setApplicationACLs(acls);
if (UserGroupInformation.isSecurityEnabled()) {
setupSecurityTokens(amContainerLaunchContext);
}
// Setup the application submission context
appSubmissionContext.setApplicationName(this.applicationName);
appSubmissionContext.setResource(resource);
appSubmissionContext.setQueue(this.appQueueName);
appSubmissionContext.setPriority(Priority.newInstance(0));
appSubmissionContext.setAMContainerSpec(amContainerLaunchContext);
// Also setup container local resources by copying local jars and files the container need to HDFS
addContainerLocalResources(applicationId);
// Submit the application
LOGGER.info("Submitting application " + sanitizeApplicationId(applicationId.toString()));
this.yarnClient.submitApplication(appSubmissionContext);
LOGGER.info("Application successfully submitted and accepted");
ApplicationReport applicationReport = this.yarnClient.getApplicationReport(applicationId);
LOGGER.info("Application Name: " + applicationReport.getName());
LOGGER.info("Application Tracking URL: " + applicationReport.getTrackingUrl());
LOGGER.info("Application User: " + applicationReport.getUser() + " Queue: " + applicationReport.getQueue());
return applicationId;
}
private Resource prepareContainerResource(GetNewApplicationResponse newApplicationResponse) {
int memoryMbs = this.appMasterMemoryMbs;
int maximumMemoryCapacity = newApplicationResponse.getMaximumResourceCapability().getMemory();
if (memoryMbs > maximumMemoryCapacity) {
LOGGER.info(String.format("Specified AM memory [%d] is above the maximum memory capacity [%d] of the "
+ "cluster, using the maximum memory capacity instead.", memoryMbs, maximumMemoryCapacity));
memoryMbs = maximumMemoryCapacity;
}
int vCores = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_CORES_KEY);
int maximumVirtualCoreCapacity = newApplicationResponse.getMaximumResourceCapability().getVirtualCores();
if (vCores > maximumVirtualCoreCapacity) {
LOGGER.info(String.format("Specified AM vcores [%d] is above the maximum vcore capacity [%d] of the "
+ "cluster, using the maximum vcore capacity instead.", memoryMbs, maximumMemoryCapacity));
vCores = maximumVirtualCoreCapacity;
}
// Set up resource type requirements for ApplicationMaster
return Resource.newInstance(memoryMbs, vCores);
}
private Map<String, LocalResource> addAppMasterLocalResources(ApplicationId applicationId) throws IOException {
Path appWorkDir = GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, applicationId.toString());
Path appMasterWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.APP_MASTER_WORK_DIR_NAME);
LOGGER.info("Configured GobblinApplicationMaster work directory to: {}", appMasterWorkDir.toString());
Map<String, LocalResource> appMasterResources = Maps.newHashMap();
FileSystem localFs = FileSystem.getLocal(new Configuration());
if (this.config.hasPath(GobblinYarnConfigurationKeys.LIB_JARS_DIR_KEY)) {
Path libJarsDestDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME);
addLibJars(new Path(this.config.getString(GobblinYarnConfigurationKeys.LIB_JARS_DIR_KEY)),
Optional.of(appMasterResources), libJarsDestDir, localFs);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_JARS_KEY)) {
Path appJarsDestDir = new Path(appMasterWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME);
addAppJars(this.config.getString(GobblinYarnConfigurationKeys.APP_MASTER_JARS_KEY),
Optional.of(appMasterResources), appJarsDestDir, localFs);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_FILES_LOCAL_KEY)) {
Path appFilesDestDir = new Path(appMasterWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME);
addAppLocalFiles(this.config.getString(GobblinYarnConfigurationKeys.APP_MASTER_FILES_LOCAL_KEY),
Optional.of(appMasterResources), appFilesDestDir, localFs);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_FILES_REMOTE_KEY)) {
YarnHelixUtils.addRemoteFilesToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.APP_MASTER_FILES_REMOTE_KEY),
appMasterResources, yarnConfiguration);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_ZIPS_REMOTE_KEY)) {
YarnHelixUtils.addRemoteZipsToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.APP_MASTER_ZIPS_REMOTE_KEY),
appMasterResources, yarnConfiguration);
}
if (this.config.hasPath(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY)) {
Path appFilesDestDir = new Path(appMasterWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME);
addJobConfPackage(this.config.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY), appFilesDestDir,
appMasterResources);
}
return appMasterResources;
}
private void addContainerLocalResources(ApplicationId applicationId) throws IOException {
Path appWorkDir = GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, applicationId.toString());
Path containerWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME);
LOGGER.info("Configured Container work directory to: {}", containerWorkDir.toString());
FileSystem localFs = FileSystem.getLocal(new Configuration());
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_JARS_KEY)) {
Path appJarsDestDir = new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME);
addAppJars(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_JARS_KEY),
Optional.<Map<String, LocalResource>>absent(), appJarsDestDir, localFs);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_LOCAL_KEY)) {
Path appFilesDestDir = new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME);
addAppLocalFiles(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_LOCAL_KEY),
Optional.<Map<String, LocalResource>>absent(), appFilesDestDir, localFs);
}
}
private void addLibJars(Path srcLibJarDir, Optional<Map<String, LocalResource>> resourceMap, Path destDir,
FileSystem localFs) throws IOException {
// Missing classpath-jars will be a fatal error.
if (!localFs.exists(srcLibJarDir)) {
throw new IllegalStateException(String.format("The library directory[%s] are not being found, abort the application", srcLibJarDir));
}
FileStatus[] libJarFiles = localFs.listStatus(srcLibJarDir);
if (libJarFiles == null || libJarFiles.length == 0) {
return;
}
for (FileStatus libJarFile : libJarFiles) {
Path destFilePath = new Path(destDir, libJarFile.getPath().getName());
this.fs.copyFromLocalFile(libJarFile.getPath(), destFilePath);
if (resourceMap.isPresent()) {
YarnHelixUtils.addFileAsLocalResource(this.fs, destFilePath, LocalResourceType.FILE, resourceMap.get());
}
}
}
private void addAppJars(String jarFilePathList, Optional<Map<String, LocalResource>> resourceMap,
Path destDir, FileSystem localFs) throws IOException {
for (String jarFilePath : SPLITTER.split(jarFilePathList)) {
Path srcFilePath = new Path(jarFilePath);
Path destFilePath = new Path(destDir, srcFilePath.getName());
if (localFs.exists(srcFilePath)) {
this.fs.copyFromLocalFile(srcFilePath, destFilePath);
} else {
LOGGER.warn("The src destination " + srcFilePath + " doesn't exists");
}
if (resourceMap.isPresent()) {
YarnHelixUtils.addFileAsLocalResource(this.fs, destFilePath, LocalResourceType.FILE, resourceMap.get());
}
}
}
private void addAppLocalFiles(String localFilePathList, Optional<Map<String, LocalResource>> resourceMap,
Path destDir, FileSystem localFs) throws IOException {
for (String localFilePath : SPLITTER.split(localFilePathList)) {
Path srcFilePath = new Path(localFilePath);
Path destFilePath = new Path(destDir, srcFilePath.getName());
if (localFs.exists(srcFilePath)) {
this.fs.copyFromLocalFile(srcFilePath, destFilePath);
if (resourceMap.isPresent()) {
YarnHelixUtils.addFileAsLocalResource(this.fs, destFilePath, LocalResourceType.FILE, resourceMap.get());
}
} else {
LOGGER.warn(String.format("The request file %s doesn't exist", srcFilePath));
}
}
}
private void addJobConfPackage(String jobConfPackagePath, Path destDir, Map<String, LocalResource> resourceMap)
throws IOException {
Path srcFilePath = new Path(jobConfPackagePath);
Path destFilePath = new Path(destDir, srcFilePath.getName() + GobblinClusterConfigurationKeys.TAR_GZ_FILE_SUFFIX);
StreamUtils.tar(FileSystem.getLocal(this.yarnConfiguration), this.fs, srcFilePath, destFilePath);
YarnHelixUtils.addFileAsLocalResource(this.fs, destFilePath, LocalResourceType.ARCHIVE, resourceMap);
}
@VisibleForTesting
protected String buildApplicationMasterCommand(String applicationId, int memoryMbs) {
Class appMasterClass;
try {
String appMasterClassName = ConfigUtils.getString(
config, GobblinYarnConfigurationKeys.APP_MASTER_CLASS, GobblinYarnConfigurationKeys.DEFAULT_APP_MASTER_CLASS);
appMasterClass = Class.forName(appMasterClassName);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
String logFileName = ConfigUtils.getString(config,
GobblinYarnConfigurationKeys.APP_MASTER_LOG_FILE_NAME, appMasterClass.getSimpleName());
return new StringBuilder()
.append(ApplicationConstants.Environment.JAVA_HOME.$()).append("/bin/java")
.append(" -Xmx").append((int) (memoryMbs * this.jvmMemoryXmxRatio) - this.jvmMemoryOverheadMbs).append("M")
.append(" -D").append(GobblinYarnConfigurationKeys.JVM_USER_TIMEZONE_CONFIG).append("=").append(this.containerTimezone)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_DIR_NAME).append("=").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_FILE_NAME).append("=").append(logFileName).append(".").append(ApplicationConstants.STDOUT)
.append(" ").append(JvmUtils.formatJvmArguments(this.appMasterJvmArgs))
.append(" ").append(appMasterClass.getName())
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)
.append(" ").append(this.applicationName)
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)
.append(" ").append(applicationId)
.append(" 1>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
logFileName).append(".").append(ApplicationConstants.STDOUT)
.append(" 2>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
logFileName).append(".").append(ApplicationConstants.STDERR)
.toString();
}
private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException, InterruptedException {
LOGGER.info("setting up SecurityTokens for containerLaunchContext.");
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
String renewerName = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
// Pass on the credentials from the hadoop token file if present.
// The value in the token file takes precedence.
if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
LOGGER.info("HADOOP_TOKEN_FILE_LOCATION is set to {} reading tokens from it for containerLaunchContext.", System.getenv(HADOOP_TOKEN_FILE_LOCATION));
Credentials tokenFileCredentials = Credentials.readTokenStorageFile(new File(System.getenv(HADOOP_TOKEN_FILE_LOCATION)),
new Configuration());
credentials.addAll(tokenFileCredentials);
LOGGER.debug("All containerLaunchContext tokens: {} present in file {} ", credentials.getAllTokens(), System.getenv(HADOOP_TOKEN_FILE_LOCATION));
}
TokenUtils.getAllFSTokens(new Configuration(), credentials, renewerName,
Optional.absent(), ConfigUtils.getStringList(this.config, TokenUtils.OTHER_NAMENODES));
// Only pass token here and no secrets. (since there is no simple way to remove single token/ get secrets)
// For RM token, only pass the RM token for the current RM, or the RM will fail to update the token
Credentials finalCredentials = new Credentials();
for (Token<? extends TokenIdentifier> token: credentials.getAllTokens()) {
if (token.getKind().equals(new Text("RM_DELEGATION_TOKEN")) && !token.getService().equals(new Text(this.originalYarnRMAddress))) {
continue;
}
finalCredentials.addToken(token.getService(), token);
}
Closer closer = Closer.create();
try {
DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
finalCredentials.writeTokenStorageToStream(dataOutputBuffer);
ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
containerLaunchContext.setTokens(fsTokens);
LOGGER.info("Setting containerLaunchContext with All credential tokens: " + finalCredentials.getAllTokens());
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());
LogCopier.Builder builder = LogCopier.newBuilder()
.useSrcFileSystem(this.fs)
.useDestFileSystem(rawLocalFs)
.readFrom(getHdfsLogDir(appWorkDir))
.writeTo(sinkLogDir)
.acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
return builder.build();
}
private Path getHdfsLogDir(Path appWorkDir) throws IOException {
Path logRootDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.APP_LOGS_DIR_NAME);
if (!this.fs.exists(logRootDir)) {
this.fs.mkdirs(logRootDir);
}
return logRootDir;
}
private AbstractYarnAppSecurityManager buildSecurityManager() throws IOException {
Path tokenFilePath = new Path(this.fs.getHomeDirectory(), this.applicationName + Path.SEPARATOR +
GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
ClassAliasResolver<AbstractYarnAppSecurityManager> aliasResolver = new ClassAliasResolver<>(
AbstractYarnAppSecurityManager.class);
try {
return (AbstractYarnAppSecurityManager) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(aliasResolver.resolve(
ConfigUtils.getString(config, GobblinYarnConfigurationKeys.SECURITY_MANAGER_CLASS, GobblinYarnConfigurationKeys.DEFAULT_SECURITY_MANAGER_CLASS))), this.config, this.helixManager, this.fs,
tokenFilePath);
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
}
@VisibleForTesting
void sendShutdownRequest() {
Criteria criteria = new Criteria();
criteria.setInstanceName("%");
criteria.setPartition("%");
criteria.setPartitionState("%");
criteria.setResource("%");
if (this.isHelixClusterManaged) {
//In the managed mode, the Gobblin Yarn Application Master connects to the Helix cluster in the Participant role.
criteria.setRecipientInstanceType(InstanceType.PARTICIPANT);
criteria.setInstanceName(this.helixInstanceName);
} else {
criteria.setRecipientInstanceType(InstanceType.CONTROLLER);
}
criteria.setSessionSpecific(true);
Message shutdownRequest = new Message(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString().toLowerCase() + UUID.randomUUID().toString());
shutdownRequest.setMsgSubType(HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString());
shutdownRequest.setMsgState(Message.MessageState.NEW);
shutdownRequest.setTgtSessionId("*");
int messagesSent = this.messagingService.send(criteria, shutdownRequest);
if (messagesSent == 0) {
LOGGER.error(String.format("Failed to send the %s message to the controller", shutdownRequest.getMsgSubType()));
}
}
@VisibleForTesting
void cleanUpAppWorkDirectory(ApplicationId applicationId) throws IOException {
Path appWorkDir = GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, applicationId.toString());
if (this.fs.exists(appWorkDir)) {
LOGGER.info("Deleting application working directory " + appWorkDir);
this.fs.delete(appWorkDir, true);
}
}
private void sendEmailOnShutdown(Optional<ApplicationReport> applicationReport) {
String subject = String.format("Gobblin Yarn application %s completed", this.applicationName);
StringBuilder messageBuilder = new StringBuilder("Gobblin Yarn ApplicationReport:");
if (applicationReport.isPresent()) {
messageBuilder.append("\n");
messageBuilder.append("\tApplication ID: ").append(applicationReport.get().getApplicationId()).append("\n");
messageBuilder.append("\tApplication attempt ID: ")
.append(applicationReport.get().getCurrentApplicationAttemptId()).append("\n");
messageBuilder.append("\tFinal application status: ").append(applicationReport.get().getFinalApplicationStatus())
.append("\n");
messageBuilder.append("\tStart time: ").append(applicationReport.get().getStartTime()).append("\n");
messageBuilder.append("\tFinish time: ").append(applicationReport.get().getFinishTime()).append("\n");
if (!Strings.isNullOrEmpty(applicationReport.get().getDiagnostics())) {
messageBuilder.append("\tDiagnostics: ").append(applicationReport.get().getDiagnostics()).append("\n");
}
ApplicationResourceUsageReport resourceUsageReport = applicationReport.get().getApplicationResourceUsageReport();
if (resourceUsageReport != null) {
messageBuilder.append("\tUsed containers: ").append(resourceUsageReport.getNumUsedContainers()).append("\n");
Resource usedResource = resourceUsageReport.getUsedResources();
if (usedResource != null) {
messageBuilder.append("\tUsed memory (MBs): ").append(usedResource.getMemory()).append("\n");
messageBuilder.append("\tUsed vcores: ").append(usedResource.getVirtualCores()).append("\n");
}
}
} else {
messageBuilder.append(' ').append("Not available");
}
try {
EmailUtils.sendEmail(ConfigUtils.configToState(this.config), subject, messageBuilder.toString());
} catch (EmailException ee) {
LOGGER.error("Failed to send email notification on shutdown", ee);
}
}
private static Config addDynamicConfig(Config config) throws IOException {
Properties properties = ConfigUtils.configToProperties(config);
if (KafkaReporterUtils.isKafkaReportingEnabled(properties) && KafkaReporterUtils.isKafkaAvroSchemaRegistryEnabled(properties)) {
KafkaAvroSchemaRegistry registry = new KafkaAvroSchemaRegistry(properties);
return addMetricReportingDynamicConfig(config, registry);
} else {
return config;
}
}
/**
* Write the config to the file specified with the config key {@value GOBBLIN_YARN_CONFIG_OUTPUT_PATH} if it
* is configured.
* @param config the config to output
* @throws IOException
*/
@VisibleForTesting
static void outputConfigToFile(Config config)
throws IOException {
// If a file path is specified then write the Azkaban config to that path in HOCON format.
// This can be used to generate an application.conf file to pass to the yarn app master and containers.
if (config.hasPath(GOBBLIN_YARN_CONFIG_OUTPUT_PATH)) {
File configFile = new File(config.getString(GOBBLIN_YARN_CONFIG_OUTPUT_PATH));
File parentDir = configFile.getParentFile();
if (parentDir != null && !parentDir.exists()) {
if (!parentDir.mkdirs()) {
throw new IOException("Error creating directories for " + parentDir);
}
}
ConfigRenderOptions configRenderOptions = ConfigRenderOptions.defaults();
configRenderOptions = configRenderOptions.setComments(false);
configRenderOptions = configRenderOptions.setOriginComments(false);
configRenderOptions = configRenderOptions.setFormatted(true);
configRenderOptions = configRenderOptions.setJson(false);
String renderedConfig = config.root().render(configRenderOptions);
FileUtils.writeStringToFile(configFile, renderedConfig, Charsets.UTF_8);
}
}
/**
* A method that adds dynamic config related to Kafka-based metric reporting. In particular, if Kafka based metric
* reporting is enabled and {@link KafkaAvroSchemaRegistry} is configured, this method registers the metric reporting
* related schemas and adds the returned schema ids to the config to be used by metric reporters in {@link org.apache.gobblin.yarn.GobblinApplicationMaster}
* and the {@link org.apache.gobblin.cluster.GobblinTaskRunner}s. The advantage of doing this is that the TaskRunners
* do not have to initiate a connection with the schema registry server and reduces the chances of metric reporter
* instantiation failures in the {@link org.apache.gobblin.cluster.GobblinTaskRunner}s.
* @param config
*/
@VisibleForTesting
static Config addMetricReportingDynamicConfig(Config config, KafkaAvroSchemaRegistry registry) throws IOException {
Properties properties = ConfigUtils.configToProperties(config);
if (KafkaReporterUtils.isEventsEnabled(properties)) {
Schema schema = KafkaReporterUtils.getGobblinTrackingEventSchema();
String schemaId = registry.register(schema, KafkaReporterUtils.getEventsTopic(properties).get());
LOGGER.info("Adding schemaId {} for GobblinTrackingEvent to the config", schemaId);
config = config.withValue(ConfigurationKeys.METRICS_REPORTING_EVENTS_KAFKA_AVRO_SCHEMA_ID,
ConfigValueFactory.fromAnyRef(schemaId));
}
if (KafkaReporterUtils.isMetricsEnabled(properties)) {
Schema schema = KafkaReporterUtils.getMetricReportSchema();
String schemaId = registry.register(schema, KafkaReporterUtils.getMetricsTopic(properties).get());
LOGGER.info("Adding schemaId {} for MetricReport to the config", schemaId);
config = config.withValue(ConfigurationKeys.METRICS_REPORTING_METRICS_KAFKA_AVRO_SCHEMA_ID,
ConfigValueFactory.fromAnyRef(schemaId));
}
return config;
}
public static void main(String[] args) throws Exception {
final GobblinYarnAppLauncher gobblinYarnAppLauncher =
new GobblinYarnAppLauncher(ConfigFactory.load(), new YarnConfiguration());
gobblinYarnAppLauncher.initializeYarnClients(ConfigFactory.load());
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
gobblinYarnAppLauncher.stop();
} catch (IOException ioe) {
LOGGER.error("Failed to shutdown the " + GobblinYarnAppLauncher.class.getSimpleName(), ioe);
} catch (TimeoutException te) {
LOGGER.error("Timeout in stopping the service manager", te);
} finally {
if (gobblinYarnAppLauncher.emailNotificationOnShutdown) {
gobblinYarnAppLauncher.sendEmailOnShutdown(Optional.<ApplicationReport>absent());
}
}
}
});
gobblinYarnAppLauncher.launch();
}
}
| 1,918 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAutoScalingManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.ArrayDeque;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.commons.compress.utils.Sets;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.JobDag;
import org.apache.helix.task.TargetState;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskPartitionState;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import static org.apache.gobblin.yarn.GobblinYarnTaskRunner.HELIX_YARN_INSTANCE_NAME_PREFIX;
/**
* The autoscaling manager is responsible for figuring out how many containers are required for the workload and
* requesting the {@link YarnService} to request that many containers.
*/
@Slf4j
public class YarnAutoScalingManager extends AbstractIdleService {
private final String AUTO_SCALING_PREFIX = GobblinYarnConfigurationKeys.GOBBLIN_YARN_PREFIX + "autoScaling.";
private final String AUTO_SCALING_POLLING_INTERVAL_SECS =
AUTO_SCALING_PREFIX + "pollingIntervalSeconds";
private static final int THRESHOLD_NUMBER_OF_ATTEMPTS_FOR_LOGGING = 20;
private final int DEFAULT_AUTO_SCALING_POLLING_INTERVAL_SECS = 60;
// Only one container will be requested for each N partitions of work
private final String AUTO_SCALING_PARTITIONS_PER_CONTAINER = AUTO_SCALING_PREFIX + "partitionsPerContainer";
private final int DEFAULT_AUTO_SCALING_PARTITIONS_PER_CONTAINER = 1;
private final String AUTO_SCALING_CONTAINER_OVERPROVISION_FACTOR = AUTO_SCALING_PREFIX + "overProvisionFactor";
private final double DEFAULT_AUTO_SCALING_CONTAINER_OVERPROVISION_FACTOR = 1.0;
// The cluster level default tags for Helix instances
private final String defaultHelixInstanceTags;
private final int defaultContainerMemoryMbs;
private final int defaultContainerCores;
private final String AUTO_SCALING_INITIAL_DELAY = AUTO_SCALING_PREFIX + "initialDelay";
private final int DEFAULT_AUTO_SCALING_INITIAL_DELAY_SECS = 60;
private final String AUTO_SCALING_WINDOW_SIZE = AUTO_SCALING_PREFIX + "windowSize";
public final static int DEFAULT_MAX_CONTAINER_IDLE_TIME_BEFORE_SCALING_DOWN_MINUTES = 10;
private final Config config;
private final HelixManager helixManager;
private final ScheduledExecutorService autoScalingExecutor;
private final YarnService yarnService;
private final int partitionsPerContainer;
private final double overProvisionFactor;
private final SlidingWindowReservoir slidingFixedSizeWindow;
private static int maxIdleTimeInMinutesBeforeScalingDown = DEFAULT_MAX_CONTAINER_IDLE_TIME_BEFORE_SCALING_DOWN_MINUTES;
private static final HashSet<TaskPartitionState>
UNUSUAL_HELIX_TASK_STATES = Sets.newHashSet(TaskPartitionState.ERROR, TaskPartitionState.DROPPED, TaskPartitionState.COMPLETED, TaskPartitionState.TIMED_OUT);
public YarnAutoScalingManager(GobblinApplicationMaster appMaster) {
this.config = appMaster.getConfig();
this.helixManager = appMaster.getMultiManager().getJobClusterHelixManager();
this.yarnService = appMaster.getYarnService();
this.partitionsPerContainer = ConfigUtils.getInt(this.config, AUTO_SCALING_PARTITIONS_PER_CONTAINER,
DEFAULT_AUTO_SCALING_PARTITIONS_PER_CONTAINER);
Preconditions.checkArgument(this.partitionsPerContainer > 0,
AUTO_SCALING_PARTITIONS_PER_CONTAINER + " needs to be greater than 0");
this.overProvisionFactor = ConfigUtils.getDouble(this.config, AUTO_SCALING_CONTAINER_OVERPROVISION_FACTOR,
DEFAULT_AUTO_SCALING_CONTAINER_OVERPROVISION_FACTOR);
this.slidingFixedSizeWindow = config.hasPath(AUTO_SCALING_WINDOW_SIZE)
? new SlidingWindowReservoir(config.getInt(AUTO_SCALING_WINDOW_SIZE), Integer.MAX_VALUE)
: new SlidingWindowReservoir(Integer.MAX_VALUE);
this.autoScalingExecutor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("AutoScalingExecutor")));
this.defaultHelixInstanceTags = ConfigUtils.getString(config,
GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, GobblinClusterConfigurationKeys.HELIX_DEFAULT_TAG);
this.defaultContainerMemoryMbs = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY);
this.defaultContainerCores = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_CORES_KEY);
}
@Override
protected void startUp() {
int scheduleInterval = ConfigUtils.getInt(this.config, AUTO_SCALING_POLLING_INTERVAL_SECS,
DEFAULT_AUTO_SCALING_POLLING_INTERVAL_SECS);
int initialDelay = ConfigUtils.getInt(this.config, AUTO_SCALING_INITIAL_DELAY,
DEFAULT_AUTO_SCALING_INITIAL_DELAY_SECS);
log.info("Starting the " + YarnAutoScalingManager.class.getSimpleName());
log.info("Scheduling the auto scaling task with an interval of {} seconds", scheduleInterval);
this.autoScalingExecutor.scheduleAtFixedRate(new YarnAutoScalingRunnable(new TaskDriver(this.helixManager),
this.yarnService, this.partitionsPerContainer, this.overProvisionFactor,
this.slidingFixedSizeWindow, this.helixManager.getHelixDataAccessor(), this.defaultHelixInstanceTags,
this.defaultContainerMemoryMbs, this.defaultContainerCores),
initialDelay, scheduleInterval, TimeUnit.SECONDS);
}
@Override
protected void shutDown() {
log.info("Stopping the " + YarnAutoScalingManager.class.getSimpleName());
ExecutorsUtils.shutdownExecutorService(this.autoScalingExecutor, Optional.of(log));
}
/**
* A {@link Runnable} that figures out the number of containers required for the workload
* and requests those containers.
*/
@VisibleForTesting
@AllArgsConstructor
static class YarnAutoScalingRunnable implements Runnable {
private final TaskDriver taskDriver;
private final YarnService yarnService;
private final int partitionsPerContainer;
private final double overProvisionFactor;
private final SlidingWindowReservoir slidingWindowReservoir;
private final HelixDataAccessor helixDataAccessor;
private final String defaultHelixInstanceTags;
private final int defaultContainerMemoryMbs;
private final int defaultContainerCores;
/**
* A static map that keep track of an idle instance and its latest beginning idle time.
* If an instance is no longer idle when inspected, it will be dropped from this map.
*/
private static final Map<String, Long> instanceIdleSince = new HashMap<>();
@Override
public void run() {
// Suppress errors to avoid interrupting any scheduled executions of this Runnable
try {
runInternal();
} catch (Throwable t) {
log.warn("Suppressing error from YarnAutoScalingRunnable.run()", t);
}
}
private String getInuseParticipantForHelixPartition(JobContext jobContext, int partition) {
if (jobContext.getPartitionNumAttempts(partition) > THRESHOLD_NUMBER_OF_ATTEMPTS_FOR_LOGGING) {
log.warn("Helix task {} has been retried for {} times, please check the config to see how we can handle this task better",
jobContext.getTaskIdForPartition(partition), jobContext.getPartitionNumAttempts(partition));
}
if (!UNUSUAL_HELIX_TASK_STATES.contains(jobContext.getPartitionState(partition))) {
return jobContext.getAssignedParticipant(partition);
}
// adding log here now for debugging
//todo: if this happens frequently, we should reset to status to retriable or at least report the error earlier
log.info("Helix task {} is in {} state which is unexpected, please watch out to see if this get recovered",
jobContext.getTaskIdForPartition(partition), jobContext.getPartitionState(partition));
return null;
}
/**
* Iterate through the workflows configured in Helix to figure out the number of required partitions
* and request the {@link YarnService} to scale to the desired number of containers.
*/
@VisibleForTesting
void runInternal() {
Set<String> inUseInstances = new HashSet<>();
YarnContainerRequestBundle yarnContainerRequestBundle = new YarnContainerRequestBundle();
for (Map.Entry<String, WorkflowConfig> workFlowEntry : taskDriver.getWorkflows().entrySet()) {
WorkflowContext workflowContext = taskDriver.getWorkflowContext(workFlowEntry.getKey());
WorkflowConfig workflowConfig = workFlowEntry.getValue();
// Only allocate for active workflows. Those marked for deletion are ignored but the existing containers won't
// be released until maxIdleTimeInMinutesBeforeScalingDown
if (workflowContext == null ||
TargetState.DELETE.equals(workflowConfig.getTargetState()) ||
!workflowContext.getWorkflowState().equals(TaskState.IN_PROGRESS)) {
continue;
}
log.debug("Workflow name {} config {} context {}", workFlowEntry.getKey(), workFlowEntry.getValue(),
workflowContext);
JobDag jobDag = workflowConfig.getJobDag();
Set<String> jobs = jobDag.getAllNodes();
// sum up the number of partitions
for (String jobName : jobs) {
JobContext jobContext = taskDriver.getJobContext(jobName);
JobConfig jobConfig = taskDriver.getJobConfig(jobName);
Resource resource = Resource.newInstance(this.defaultContainerMemoryMbs, this.defaultContainerCores);
int numPartitions = 0;
String jobTag = defaultHelixInstanceTags;
if (jobContext != null) {
log.debug("JobContext {} num partitions {}", jobContext, jobContext.getPartitionSet().size());
inUseInstances.addAll(jobContext.getPartitionSet().stream()
.map(i -> getInuseParticipantForHelixPartition(jobContext, i))
.filter(Objects::nonNull).collect(Collectors.toSet()));
numPartitions = jobContext.getPartitionSet().size();
// Job level config for helix instance tags takes precedence over other tag configurations
if (jobConfig != null) {
if (!Strings.isNullOrEmpty(jobConfig.getInstanceGroupTag())) {
jobTag = jobConfig.getInstanceGroupTag();
}
Map<String, String> jobCommandConfigMap = jobConfig.getJobCommandConfigMap();
if(jobCommandConfigMap.containsKey(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS)){
resource.setMemory(Integer.parseInt(jobCommandConfigMap.get(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS)));
}
if(jobCommandConfigMap.containsKey(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES)){
resource.setVirtualCores(Integer.parseInt(jobCommandConfigMap.get(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES)));
}
}
}
// compute the container count as a ceiling of number of partitions divided by the number of containers
// per partition. Scale the result by a constant overprovision factor.
int containerCount = (int) Math.ceil(((double)numPartitions / this.partitionsPerContainer) * this.overProvisionFactor);
yarnContainerRequestBundle.add(jobTag, containerCount, resource);
log.info("jobName={}, jobTag={}, numPartitions={}, targetNumContainers={}",
jobName, jobTag, numPartitions, containerCount);
}
}
// Find all participants appearing in this cluster. Note that Helix instances can contain cluster-manager
// and potentially replanner-instance.
Set<String> allParticipants = HelixUtils.getParticipants(helixDataAccessor, HELIX_YARN_INSTANCE_NAME_PREFIX);
// Find all joined participants not in-use for this round of inspection.
// If idle time is beyond tolerance, mark the instance as unused by assigning timestamp as -1.
for (String participant : allParticipants) {
if (!inUseInstances.contains(participant)) {
instanceIdleSince.putIfAbsent(participant, System.currentTimeMillis());
if (!isInstanceUnused(participant)) {
inUseInstances.add(participant);
}
} else {
// A previously idle instance is now detected to be in use.
// Remove this instance if existed in the tracking map.
instanceIdleSince.remove(participant);
}
}
slidingWindowReservoir.add(yarnContainerRequestBundle);
log.debug("There are {} containers being requested in total, tag-count map {}, tag-resource map {}",
yarnContainerRequestBundle.getTotalContainers(), yarnContainerRequestBundle.getHelixTagContainerCountMap(),
yarnContainerRequestBundle.getHelixTagResourceMap());
this.yarnService.requestTargetNumberOfContainers(slidingWindowReservoir.getMax(), inUseInstances);
}
@VisibleForTesting
/**
* Return true is the condition for tagging an instance as "unused" holds.
* The condition, by default is that if an instance went back to
* active (having partition running on it) within {@link #maxIdleTimeInMinutesBeforeScalingDown} minutes, we will
* not tag that instance as "unused" and have that as the candidate for scaling down.
*/
boolean isInstanceUnused(String participant){
return System.currentTimeMillis() - instanceIdleSince.get(participant) >
TimeUnit.MINUTES.toMillis(maxIdleTimeInMinutesBeforeScalingDown);
}
}
/**
* A FIFO queue with fixed size and returns maxValue among all elements within the queue in constant time.
* This data structure prevents temporary fluctuation in the number of active helix partitions as the size of queue
* grows and will be less sensitive when scaling down is actually required.
*
* The interface for this is implemented in a minimal-necessity manner to serve only as a sliding-sized-window
* which captures max value. It is NOT built for general purpose.
*/
static class SlidingWindowReservoir {
private ArrayDeque<YarnContainerRequestBundle> fifoQueue;
private PriorityQueue<YarnContainerRequestBundle> priorityQueue;
// Queue Size
private int maxSize;
private static final int DEFAULT_MAX_SIZE = 10;
// Upper-bound of value within the queue.
private int upperBound;
public SlidingWindowReservoir(int maxSize, int upperBound) {
Preconditions.checkArgument(maxSize > 0, "maxSize has to be a value larger than 0");
this.maxSize = maxSize;
this.upperBound = upperBound;
this.fifoQueue = new ArrayDeque<>(maxSize);
this.priorityQueue = new PriorityQueue<>(maxSize, new Comparator<YarnContainerRequestBundle>() {
@Override
public int compare(YarnContainerRequestBundle o1, YarnContainerRequestBundle o2) {
Integer i2 = o2.getTotalContainers();
return i2.compareTo(o1.getTotalContainers());
}
});
}
public SlidingWindowReservoir(int upperBound) {
this(DEFAULT_MAX_SIZE, upperBound);
}
/**
* Add element into data structure.
* When a new element is larger than upperbound, reject the value since we may request too many Yarn containers.
* When queue is full, evict head of FIFO-queue (In FIFO queue, elements are inserted from tail).
*/
public void add(YarnContainerRequestBundle e) {
if (e.getTotalContainers() > upperBound) {
log.error(String.format("Request of getting %s containers seems to be excessive, rejected", e));
return;
}
if (fifoQueue.size() == maxSize) {
YarnContainerRequestBundle removedElement = fifoQueue.remove();
priorityQueue.remove(removedElement);
}
if (fifoQueue.size() == priorityQueue.size()) {
fifoQueue.add(e);
priorityQueue.add(e);
} else {
throw new IllegalStateException("Queue has its internal data structure being inconsistent.");
}
}
/**
* If queue is empty, throw {@link IllegalStateException}.
*/
public YarnContainerRequestBundle getMax() {
if (priorityQueue.size() > 0) {
return this.priorityQueue.peek();
} else {
throw new IllegalStateException("Queried before elements added into the queue.");
}
}
}
}
| 1,919 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/ContainerReleaseRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
import java.util.Collection;
import org.apache.hadoop.yarn.api.records.Container;
/**
* A type of event for container release requests to be used with a {@link com.google.common.eventbus.EventBus}.
* This event is different than {@link ContainerShutdownRequest} because it releases the container through
* the Resource Manager, while {@link ContainerShutdownRequest} shuts down a container through the
* Node Manager
*/
public class ContainerReleaseRequest {
private final Collection<Container> containers;
public ContainerReleaseRequest(Collection<Container> containers) {
this.containers = containers;
}
/**
* Get the IDs of the containers to release.
*
* @return the IDs of the containers to release
*/
public Collection<Container> getContainers() {
return this.containers;
}
}
| 1,920 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/ApplicationReportArrivalEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import com.google.common.base.Preconditions;
/**
* A type of events for the arrival of an {@link ApplicationReport} to be used with a
* {@link com.google.common.eventbus.EventBus}.
*
* @author Yinan Li
*/
public class ApplicationReportArrivalEvent {
private final ApplicationReport applicationReport;
public ApplicationReportArrivalEvent(ApplicationReport applicationReport) {
Preconditions.checkNotNull(applicationReport);
this.applicationReport = applicationReport;
}
/**
* Get the {@link ApplicationReport} this event carries.
*
* @return the {@link ApplicationReport} this event carries
*/
public ApplicationReport getApplicationReport() {
return this.applicationReport;
}
}
| 1,921 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/NewContainerRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
import lombok.Getter;
import org.apache.hadoop.yarn.api.records.Container;
import com.google.common.base.Optional;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* A type of events for new container requests to be used with a {@link com.google.common.eventbus.EventBus}.
*
* @author Yinan Li
*/
public class NewContainerRequest {
private final Optional<Container> replacedContainer;
@Getter
private final Optional<Resource> resource;
public NewContainerRequest(Optional<Container> replacedContainer) {
this.replacedContainer = replacedContainer;
this.resource = Optional.absent();
}
public NewContainerRequest(Optional<Container> replacedContainer, Optional<Resource> resource) {
this.replacedContainer = replacedContainer;
this.resource = resource;
}
/**
* Get (optionally) the {@link Container} to be replaced by the to be requested new container.
*
* @return an {@link Optional} of the {@link Container} to be replaced
*/
public Optional<Container> getReplacedContainer() {
return this.replacedContainer;
}
} | 1,922 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/ContainerShutdownRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
import java.util.Collection;
import org.apache.hadoop.yarn.api.records.Container;
/**
* A type of events for container shutdown requests to be used with a {@link com.google.common.eventbus.EventBus}.
*
* @author Yinan Li
*/
public class ContainerShutdownRequest {
private final Collection<Container> containers;
public ContainerShutdownRequest(Collection<Container> containers) {
this.containers = containers;
}
/**
* Get the ID of the shutdown container.
*
* @return the ID of the shutdown container
*/
public Collection<Container> getContainers() {
return this.containers;
}
}
| 1,923 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/DelegationTokenUpdatedEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
/**
* A dummy class representing a type of events fired when the delegation token has been updated by the controller.
*
* @author Yinan Li
*/
public class DelegationTokenUpdatedEvent {
}
| 1,924 |
0 | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn | Create_ds/gobblin/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/event/GetApplicationReportFailureEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn.event;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
/**
* A type of events posted when the {@link org.apache.gobblin.yarn.GobblinYarnAppLauncher} fails to get
* the {@link ApplicationReport} of the Gobblin Yarn application.
*
* @author Yinan Li
*/
public class GetApplicationReportFailureEvent {
private final Throwable cause;
public GetApplicationReportFailureEvent(Throwable cause) {
this.cause = cause;
}
/**
* Get the {@link Throwable} that's the cause of the failure.
*
* @return the {@link Throwable} that's the cause of the failure
*/
public Throwable getCause() {
return this.cause;
}
}
| 1,925 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/fork/ForkerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.runtime.BasicTestControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import io.reactivex.Flowable;
import lombok.Getter;
public class ForkerTest {
@Test
public void test() throws Exception {
Forker forker = new Forker();
MyFlowable<StreamEntity<byte[]>> flowable = new MyFlowable<>();
RecordStreamWithMetadata<byte[], String> stream =
new RecordStreamWithMetadata<>(flowable, GlobalMetadata.<String>builder().schema("schema").build());
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(ConfigurationKeys.FORK_BRANCHES_KEY, "3");
Forker.ForkedStream<byte[], String> forkedStream = forker.forkStream(stream, new MyForkOperator(), workUnitState);
Assert.assertEquals(forkedStream.getForkedStreams().size(), 3);
Queue<StreamEntity<byte[]>> output0 = new LinkedList<>();
forkedStream.getForkedStreams().get(0).getRecordStream().subscribe(output0::add);
Queue<StreamEntity<byte[]>> output1 = new LinkedList<>();
forkedStream.getForkedStreams().get(1).getRecordStream().subscribe(output1::add);
Queue<StreamEntity<byte[]>> output2 = new LinkedList<>();
forkedStream.getForkedStreams().get(2).getRecordStream().subscribe(output2::add);
flowable._subscriber.onNext(new RecordEnvelope<>(new byte[]{1, 1, 1}));
Assert.assertTrue(output0.poll() instanceof RecordEnvelope);
Assert.assertTrue(output1.poll() instanceof RecordEnvelope);
Assert.assertTrue(output2.poll() instanceof RecordEnvelope);
flowable._subscriber.onNext(new RecordEnvelope<>(new byte[]{1, 0, 0}));
Assert.assertTrue(output0.poll() instanceof RecordEnvelope);
Assert.assertNull(output1.poll());
Assert.assertNull(output2.poll());
flowable._subscriber.onNext(new RecordEnvelope<>(new byte[]{0, 1, 1}));
Assert.assertNull(output0.poll());
Assert.assertTrue(output1.poll() instanceof RecordEnvelope);
Assert.assertTrue(output2.poll() instanceof RecordEnvelope);
flowable._subscriber.onNext(new BasicTestControlMessage<byte[]>("control"));
Assert.assertTrue(output0.poll() instanceof BasicTestControlMessage);
Assert.assertTrue(output1.poll() instanceof BasicTestControlMessage);
Assert.assertTrue(output2.poll() instanceof BasicTestControlMessage);
flowable._subscriber.onComplete();
}
public static class MyForkOperator implements ForkOperator<String, byte[]> {
@Override
public void init(WorkUnitState workUnitState) throws Exception {
}
@Override
public int getBranches(WorkUnitState workUnitState) {
return workUnitState.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY);
}
@Override
public List<Boolean> forkSchema(WorkUnitState workUnitState, String input) {
return Collections.nCopies(getBranches(workUnitState), true);
}
@Override
public List<Boolean> forkDataRecord(WorkUnitState workUnitState, byte[] input) {
List<Boolean> output = Lists.newArrayList();
for (byte b : input) {
output.add(b > 0);
}
return output;
}
@Override
public void close() throws IOException {
}
}
public static class MyFlowable<T> extends Flowable<T> {
@Getter
Subscriber<? super T> _subscriber;
@Override
protected void subscribeActual(Subscriber<? super T> s) {
s.onSubscribe(new Subscription() {
@Override
public void request(long n) {
// do nothing
}
@Override
public void cancel() {
// do nothing
}
});
_subscriber = s;
}
}
}
| 1,926 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/fork/CopyHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
import java.io.ByteArrayInputStream;
import java.util.Random;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Test
public class CopyHelperTest {
private static final Random RANDOM = new Random();
@Test
public void testCopyable()
throws CopyNotSupportedException {
Copyable c = mock(Copyable.class);
Assert.assertTrue(CopyHelper.isCopyable(c));
Object copy = new Object();
when(c.copy()).thenReturn(copy);
Assert.assertEquals(CopyHelper.copy(c), copy);
Assert.assertEquals(CopyHelper.copy(c), copy);
}
@Test
public void testByteArray()
throws CopyNotSupportedException {
int length = RANDOM.nextInt(200);
byte[] bytes = new byte[length];
RANDOM.nextBytes(bytes);
Assert.assertTrue(CopyHelper.isCopyable(bytes));
byte[] copiedBytes = (byte[]) CopyHelper.copy(bytes);
Assert.assertTrue(copiedBytes != bytes, "Copied bytes reference should be different for every copy after that");
Assert.assertEquals(copiedBytes, bytes, "Copied bytes value should be the same");
}
@Test
public void testImmutables()
throws CopyNotSupportedException {
Object nullObject = null;
Integer integer = RANDOM.nextInt(200);
byte[] bytes = new byte[integer];
RANDOM.nextBytes(bytes);
String string = new String(bytes);
Long longNum = RANDOM.nextLong();
Object[] immutables = new Object[]{nullObject, integer, string, longNum};
for (Object immutable : immutables) {
Assert.assertTrue(CopyHelper.isCopyable(immutable));
for (int i=0; i < 2; ++i) {
Object copiedObject = CopyHelper.copy(immutable);
Assert.assertEquals(copiedObject, immutable);
}
}
}
@Test
public void testUnsupportedTypes()
throws CopyNotSupportedException {
Object foobar = mock(ByteArrayInputStream.class);
try {
CopyHelper.copy(foobar);
Assert.fail("Should throw exception");
} catch (CopyNotSupportedException cnse) {
} catch (Exception e) {
Assert.fail("Should not throw any exception other than CopyNotSupportedException. ", e);
}
}
}
| 1,927 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/configuration/StateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
public class StateTest {
private LinkedBlockingQueue<Throwable> exceptions = new LinkedBlockingQueue<>();
/**
* This test checks that state object is thread safe. We run 2 threads, one of them continuously adds and removes key/values
* to the state and other thread calls getProperties.
*/
@Test
public void testGetPropertiesThreadSafety() {
try {
final State state = new State();
for (int i = 0; i < 1000; i++) {
state.setProp(Integer.toString(i), Integer.toString(i));
}
ExecutorService executorService = Executors.newFixedThreadPool(2);
executorService.submit(new Runnable() {
@Override
public void run() {
for (int j = 0; j < 1000; j++) {
for (int i = 0; i < 1000; i++) {
try {
state.removeProp(Integer.toString(i));
state.setProp(Integer.toString(i), Integer.toString(i));
} catch (Throwable t) {
exceptions.add(t);
}
}
}
}
});
executorService.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 1000; i++) {
try {
state.getProperties().get(Integer.toString(i));
} catch (Throwable t) {
exceptions.add(t);
}
}
}
});
executorService.shutdown();
if (!executorService.awaitTermination(100, TimeUnit.SECONDS)) {
throw new RuntimeException("Executor service still running");
}
} catch (Throwable t) {
Assert.fail("Concurrency test failed", t);
}
if (!this.exceptions.isEmpty()) {
Assert.fail("Concurrency test failed with first exception: " + ExceptionUtils.getFullStackTrace(this.exceptions.poll()));
}
}
@Test
public void testRemovePropsWithPrefix() {
final State state = new State();
final String prefix = "prefix";
for (int i = 0; i < 10; i++) {
state.setProp("prefix." + i, i);
}
Assert.assertTrue(state.getPropertyNames().size() == 10);
state.removePropsWithPrefix(prefix);
Assert.assertTrue(state.getPropertyNames().size() == 0);
}
} | 1,928 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/password/PasswordManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.password;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.UUID;
import org.jasypt.exceptions.EncryptionOperationNotPossibleException;
import org.jasypt.util.text.BasicTextEncryptor;
import org.jasypt.util.text.StrongTextEncryptor;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Files;
@Test(enabled=false, groups = {"disabledOnCI"} )
public class PasswordManagerTest {
@Test (enabled=false)
public void testReadNormalPassword() throws IOException {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
File masterPwdFile = getMasterPwdFile(masterPassword);
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdFile.toString());
Assert.assertEquals(PasswordManager.getInstance(state).readPassword(password), password);
masterPwdFile.delete();
}
@Test (enabled=false)
public void testMasterPasswordNotExist() {
String password = "ENC(" + UUID.randomUUID().toString() + ")";
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, UUID.randomUUID());
Assert.assertEquals(PasswordManager.getInstance(state).readPassword(password), password);
}
@Test (enabled=false)
public void testBasicEncryptionAndDecryption() throws IOException {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
File masterPwdFile = getMasterPwdFile(masterPassword);
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdFile.toString());
BasicTextEncryptor encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword);
String encrypted = encryptor.encrypt(password);
encrypted = "ENC(" + encrypted + ")";
String decrypted = PasswordManager.getInstance(state).readPassword(encrypted);
Assert.assertEquals(decrypted, password);
}
@Test (enabled=false)
public void testStrongEncryptionAndDecryption() throws IOException {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
File masterPwdFile = getMasterPwdFile(masterPassword);
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdFile.toString());
state.setProp(ConfigurationKeys.ENCRYPT_USE_STRONG_ENCRYPTOR, true);
try{
StrongTextEncryptor encryptor = new StrongTextEncryptor();
encryptor.setPassword(masterPassword);
String encrypted = encryptor.encrypt(password);
encrypted = "ENC(" + encrypted + ")";
String decrypted = PasswordManager.getInstance(state).readPassword(encrypted);
Assert.assertEquals(decrypted, password);
}
catch (EncryptionOperationNotPossibleException e) {
//no strong encryption is supported
}
}
@Test (enabled=false)
public void testMultipleMasterPasswords() throws IOException {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
String masterPassword1 = UUID.randomUUID().toString();
String masterPassword2 = UUID.randomUUID().toString();
String masterPassword3 = UUID.randomUUID().toString();
File masterPasswordFile = File.createTempFile("masterPassword", null);
Files.write(masterPassword, masterPasswordFile, Charset.defaultCharset());
Files.write(masterPassword1, new File(masterPasswordFile.toString()+".1"), Charset.defaultCharset());
Files.write(masterPassword2, new File(masterPasswordFile.toString()+".2"), Charset.defaultCharset());
Files.write(masterPassword3, new File(masterPasswordFile.toString()+".3"), Charset.defaultCharset());
State state = new State();
BasicTextEncryptor encryptor = new BasicTextEncryptor();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPasswordFile.toString());
state.setProp(ConfigurationKeys.NUMBER_OF_ENCRYPT_KEYS, 3);
PasswordManager passwordManager = PasswordManager.getInstance(state);
// Test current master password
encryptor.setPassword(masterPassword);
String encrypted = "ENC(" + encryptor.encrypt(password) + ")";
String decrypted = passwordManager.readPassword(encrypted);
Assert.assertEquals(decrypted, password);
// Test last master password using same passwordManager
encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword1);
encrypted = "ENC(" + encryptor.encrypt(password) + ")";
decrypted = passwordManager.readPassword(encrypted);
Assert.assertEquals(decrypted, password);
// Test second last master password using same passwordManager
encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword2);
encrypted = "ENC(" + encryptor.encrypt(password) + ")";
decrypted = passwordManager.readPassword(encrypted);
Assert.assertEquals(decrypted, password);
// Test third last master password using same passwordManager
// This one is not accepted because ConfigurationKeys.NUMBER_OF_ENCRYPT_KEYS = 3
encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword3);
encrypted = "ENC(" + encryptor.encrypt(password) + ")";
try {
passwordManager.readPassword(encrypted);
} catch (RuntimeException e) {
Assert.assertTrue(e.getMessage().startsWith( "Failed to decrypt password"));
return;
}
Assert.fail("Password Manager decrypted too old password.");
}
@Test (enabled=false)
public void testMultipleMasterPasswordsWithoutPasswordFiles() throws IOException {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
String masterPassword1 = UUID.randomUUID().toString();
File masterPasswordFile = File.createTempFile("masterPassword", null);
Files.write(masterPassword, masterPasswordFile, Charset.defaultCharset());
State state = new State();
BasicTextEncryptor encryptor = new BasicTextEncryptor();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPasswordFile.toString());
PasswordManager passwordManager = PasswordManager.getInstance(state);
// Test current master password
encryptor.setPassword(masterPassword);
String encrypted = "ENC(" + encryptor.encrypt(password) + ")";
String decrypted = passwordManager.readPassword(encrypted);
Assert.assertEquals(decrypted, password);
// Test last master password using same passwordManager
// This should throw FileNotFoundException as file for masterPassword1 is not created.
encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword1);
encrypted = "ENC(" + encryptor.encrypt(password) + ")";
try {
passwordManager.readPassword(encrypted);
} catch (RuntimeException e) {
Assert.assertTrue(e.getMessage().startsWith("Failed to decrypt password"));
return;
}
Assert.fail("Password Manager decrypted password without correct master password.");
}
public static File getMasterPwdFile(String masterPwd) throws IOException {
File masterPwdFile = File.createTempFile("masterPassword", null);
Files.write(masterPwd, masterPwdFile, Charset.defaultCharset());
return masterPwdFile;
}
} | 1,929 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/password/EncryptedPasswordAuthenticatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.password;
import java.io.File;
import java.net.Authenticator;
import java.util.Properties;
import java.util.UUID;
import org.jasypt.util.text.BasicTextEncryptor;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* {@link Authenticator} that uses a username and password from the provided {@link Properties} to authenticate, and
* also decrypts the password using {@link PasswordManager}
*/
public class EncryptedPasswordAuthenticatorTest extends Authenticator {
@Test (enabled=false)
public void testEncryptedPassword() throws Exception {
String password = UUID.randomUUID().toString();
String masterPassword = UUID.randomUUID().toString();
File masterPwdFile = PasswordManagerTest.getMasterPwdFile(masterPassword);
BasicTextEncryptor encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword);
Properties props = new Properties();
props.put(EncryptedPasswordAuthenticator.AUTHENTICATOR_USERNAME, "testuser");
props.put(EncryptedPasswordAuthenticator.AUTHENTICATOR_PASSWORD, "ENC(" + encryptor.encrypt(password) + ")");
props.put(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdFile.toString());
EncryptedPasswordAuthenticator authenticator = new EncryptedPasswordAuthenticator(props);
Assert.assertEquals(authenticator.getPasswordAuthentication().getUserName(), "testuser");
Assert.assertEquals(authenticator.getPasswordAuthentication().getPassword(), password.toCharArray());
masterPwdFile.delete();
}
@Test
public void testUnencryptedPassword() {
String password = UUID.randomUUID().toString();
Properties props = new Properties();
props.put(EncryptedPasswordAuthenticator.AUTHENTICATOR_USERNAME, "testuser");
props.put(EncryptedPasswordAuthenticator.AUTHENTICATOR_PASSWORD, password);
EncryptedPasswordAuthenticator authenticator = new EncryptedPasswordAuthenticator(props);
Assert.assertEquals(authenticator.getPasswordAuthentication().getUserName(), "testuser");
Assert.assertEquals(authenticator.getPasswordAuthentication().getPassword(), password.toCharArray());
}
}
| 1,930 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/compat/TextSerializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compat;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Strings;
import org.apache.gobblin.compat.hadoop.TextSerializer;
public class TextSerializerTest {
private static final String[] textsToSerialize = new String[]{"abracadabra", Strings.repeat("longString", 128000)};
@Test
public void testSerialize()
throws IOException {
// Use our serializer, verify Hadoop deserializer can read it back
for (String textToSerialize : textsToSerialize) {
ByteArrayOutputStream bOs = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(bOs);
TextSerializer.writeStringAsText(dataOutputStream, textToSerialize);
dataOutputStream.close();
ByteArrayInputStream bIn = new ByteArrayInputStream(bOs.toByteArray());
DataInputStream dataInputStream = new DataInputStream(bIn);
Text hadoopText = new Text();
hadoopText.readFields(dataInputStream);
Assert.assertEquals(hadoopText.toString(), textToSerialize);
}
}
@Test
public void testDeserialize() throws IOException {
// Use Hadoop's serializer, verify our deserializer can read the string back
for (String textToSerialize : textsToSerialize) {
ByteArrayOutputStream bOs = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(bOs);
Text hadoopText = new Text();
hadoopText.set(textToSerialize);
hadoopText.write(dataOutputStream);
dataOutputStream.close();
ByteArrayInputStream bIn = new ByteArrayInputStream(bOs.toByteArray());
DataInputStream dataInputStream = new DataInputStream(bIn);
String deserializedString = TextSerializer.readTextAsString(dataInputStream);
Assert.assertEquals(deserializedString, textToSerialize);
}
}
}
| 1,931 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/broker | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/broker/gobblin_scopes/GobblinScopesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker.gobblin_scopes;
import org.testng.Assert;
import org.testng.annotations.Test;
public class GobblinScopesTest {
@Test
public void test() {
GobblinScopeInstance containerScope = new GobblinScopeInstance(GobblinScopeTypes.CONTAINER, "myContainer");
Assert.assertEquals(containerScope.getScopeId(), "myContainer");
JobScopeInstance jobScope = new JobScopeInstance("myJob", "job123");
Assert.assertEquals(jobScope.getJobId(), "job123");
Assert.assertEquals(jobScope.getJobName(), "myJob");
TaskScopeInstance taskScope = new TaskScopeInstance("myTask");
Assert.assertEquals(taskScope.getTaskId(), "myTask");
try {
new GobblinScopeInstance(GobblinScopeTypes.JOB, "myJob");
Assert.fail();
} catch (IllegalArgumentException iae) {
// expected because should use JobScopeInstance
}
try {
new GobblinScopeInstance(GobblinScopeTypes.TASK, "myJob");
Assert.fail();
} catch (IllegalArgumentException iae) {
// expected because should use TaskScopeInstance
}
}
} | 1,932 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/converter/ConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.ack.BasicAckableForTesting;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import io.reactivex.Flowable;
public class ConverterTest {
@Test
public void testEmptyOutputIterable() throws Exception {
MyConverter converter = new MyConverter();
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordStreamWithMetadata<Integer, String> stream =
new RecordStreamWithMetadata<>(Flowable.just(new RecordEnvelope<>(0)),
GlobalMetadata.<String>builder().schema("schema").build()).mapRecords(r -> {
r.addCallBack(ackable);
return r;
});
List<StreamEntity<Integer>> outputRecords = Lists.newArrayList();
converter.processStream(stream, new WorkUnitState()).getRecordStream().subscribe(outputRecords::add);
Assert.assertEquals(outputRecords.size(), 0);
Assert.assertEquals(ackable.acked, 1); // record got filtered, acked immediately
}
@Test
public void testSingleOutputIterable() throws Exception {
MyConverter converter = new MyConverter();
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordStreamWithMetadata<Integer, String> stream =
new RecordStreamWithMetadata<>(Flowable.just(new RecordEnvelope<>(1)),
GlobalMetadata.<String>builder().schema("schema").build()).mapRecords(r -> {
r.addCallBack(ackable);
return r;
});
List<StreamEntity<Integer>> outputRecords = Lists.newArrayList();
converter.processStream(stream, new WorkUnitState()).getRecordStream().subscribe(outputRecords::add);
Assert.assertEquals(outputRecords.size(), 1);
Assert.assertEquals(ackable.acked, 0); // output record has not been acked
outputRecords.get(0).ack();
Assert.assertEquals(ackable.acked, 1); // output record acked
}
@Test
public void testMultiOutputIterable() throws Exception {
MyConverter converter = new MyConverter();
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordStreamWithMetadata<Integer, String> stream =
new RecordStreamWithMetadata<>(Flowable.just(new RecordEnvelope<>(2)),
GlobalMetadata.<String>builder().schema("schema").build()).mapRecords(r -> {
r.addCallBack(ackable);
return r;
});
List<StreamEntity<Integer>> outputRecords = Lists.newArrayList();
converter.processStream(stream, new WorkUnitState()).getRecordStream().subscribe(outputRecords::add);
Assert.assertEquals(outputRecords.size(), 2);
Assert.assertEquals(ackable.acked, 0); // output record has not been acked
outputRecords.get(0).ack();
Assert.assertEquals(ackable.acked, 0); // only one output record acked, still need to ack another derived record
outputRecords.get(1).ack();
Assert.assertEquals(ackable.acked, 1); // all output records acked
}
@Test
public void testMixedStream() throws Exception {
MyConverter converter = new MyConverter();
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordStreamWithMetadata<Integer, String> stream =
new RecordStreamWithMetadata<>(Flowable.just(new RecordEnvelope<>(1), new MyControlMessage<>()),
GlobalMetadata.<String>builder().schema("schema").build()).mapRecords(r -> {
r.addCallBack(ackable);
return r;
});
List<StreamEntity<Integer>> outputRecords = Lists.newArrayList();
converter.processStream(stream, new WorkUnitState()).getRecordStream().subscribe(outputRecords::add);
Assert.assertEquals(outputRecords.size(), 2);
Assert.assertEquals(((RecordEnvelope<Integer>) outputRecords.get(0)).getRecord(), new Integer(0));
Assert.assertTrue(outputRecords.get(1) instanceof MyControlMessage);
}
@Test
public void testAddRecordMetadata() throws Exception {
MyConverter2 converter = new MyConverter2();
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordStreamWithMetadata<Integer, String> stream =
new RecordStreamWithMetadata<>(Flowable.just(new RecordEnvelope<>(1), new RecordEnvelope<>(2)),
GlobalMetadata.<String>builder().schema("schema").build()).mapRecords(r -> {
r.addCallBack(ackable);
return r;
});
List<StreamEntity<Integer>> outputRecords = Lists.newArrayList();
converter.processStream(stream, new WorkUnitState()).getRecordStream().subscribe(outputRecords::add);
Assert.assertEquals(outputRecords.size(), 2);
RecordEnvelope<Integer> envelope = (RecordEnvelope<Integer>)outputRecords.get(0);
Assert.assertEquals(envelope.getRecord().intValue(), 2);
Assert.assertEquals(((Integer)envelope.getRecordMetadata("original_value")).intValue(), 1);
envelope = (RecordEnvelope<Integer>)outputRecords.get(1);
Assert.assertEquals(envelope.getRecord().intValue(), 3);
Assert.assertEquals(((Integer)envelope.getRecordMetadata("original_value")).intValue(), 2);
}
public static class MyConverter extends Converter<String, String, Integer, Integer> {
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<Integer> convertRecord(String outputSchema, Integer inputRecord, WorkUnitState workUnit)
throws DataConversionException {
List<Integer> output = Lists.newArrayList();
for (int i = 0; i < inputRecord; i++) {
output.add(0);
}
return output;
}
}
// for testing the overriding of convertRecordEnvelope to add record metadata
public static class MyConverter2 extends Converter<String, String, Integer, Integer> {
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<Integer> convertRecord(String outputSchema, Integer inputRecord, WorkUnitState workUnit)
throws DataConversionException {
throw new UnsupportedOperationException("not supported");
}
@Override
public Flowable<RecordEnvelope<Integer>> convertRecordEnvelope(String outputSchema,
RecordEnvelope<Integer> inputRecordEnvelope, WorkUnitState workUnitState)
throws DataConversionException {
RecordEnvelope<Integer> outputRecord =
inputRecordEnvelope.withRecord(Integer.valueOf(inputRecordEnvelope.getRecord() + 1));
outputRecord.setRecordMetadata("original_value", inputRecordEnvelope.getRecord());
return Flowable.just(outputRecord);
}
}
public static class MyControlMessage<D> extends ControlMessage<D> {
@Override
protected StreamEntity<D> buildClone() {
return new MyControlMessage<>();
}
}
}
| 1,933 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util/ClassAliasResolverTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.linkedin.gobblin.TestAlias.DummyAliasCom;
import com.linkedin.gobblin.TestAlias.IDummyAliasCom;
import org.apache.gobblin.annotation.Alias;
@Test(groups = { "gobblin.api.util"})
public class ClassAliasResolverTest {
@Test
public void testResolve() {
ClassAliasResolver<IDummyAliasTest> resolver = new ClassAliasResolver<>(IDummyAliasTest.class);
Assert.assertEquals(resolver.resolve("abc"), DummyAliasTest.class.getName());
// Resolve returns the passed string if alias mapping does not exist
Assert.assertEquals(resolver.resolve("abcd"), "abcd");
}
@Test
public void testResolveComGobblinPackage() {
ClassAliasResolver<IDummyAliasCom> resolver = new ClassAliasResolver<>(IDummyAliasCom.class);
Assert.assertEquals(resolver.resolve("com.alias"), DummyAliasCom.class.getName());
// Resolve returns the passed string if alias mapping does not exist
Assert.assertEquals(resolver.resolve("abcd"), "abcd");
}
@Test
public void testResolveClass() throws Exception {
ClassAliasResolver<IDummyAliasTest> resolver = new ClassAliasResolver<>(IDummyAliasTest.class);
Assert.assertEquals(resolver.resolveClass("abc"), DummyAliasTest.class);
Assert.assertEquals(resolver.resolveClass(DummyAliasTest.class.getName()), DummyAliasTest.class);
try {
resolver.resolveClass("def");
Assert.fail();
} catch (ClassNotFoundException cnfe) {
// expect to throw exception
}
try {
resolver.resolveClass(AnotherAliasClass.class.getName());
Assert.fail();
} catch (ClassNotFoundException cnfe) {
// expect to throw exception
}
}
@Alias(value="abc")
public static class DummyAliasTest implements IDummyAliasTest{}
public static interface IDummyAliasTest {}
@Alias(value="abc")
public static class AnotherAliasClass {}
@Alias(value="def")
public static class YetAnotherAliasClass {}
}
| 1,934 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util/test/TestClass.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import lombok.EqualsAndHashCode;
import java.util.List;
import java.util.Map;
import java.util.Random;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* Used for {@link org.apache.gobblin.util.io.GsonInterfaceAdapterTest}.
*/
@EqualsAndHashCode(callSuper = true)
public class TestClass extends BaseClass {
private static final Random random = new Random();
private final int intValue = random.nextInt();
private final long longValue = random.nextLong();
private final double doubleValue = random.nextLong();
private final Map<String, Integer> map = createRandomMap();
private final List<String> list = createRandomList();
private final Optional<String> present = Optional.of(Integer.toString(random.nextInt()));
// Set manually to absent
public Optional<String> absent = Optional.of("a");
private final Optional<BaseClass> optionalObject = Optional.of(new BaseClass());
private final BaseClass polymorphic = new ExtendedClass();
private final Optional<? extends BaseClass> polymorphicOptional = Optional.of(new ExtendedClass());
private static Map<String, Integer> createRandomMap() {
Map<String, Integer> map = Maps.newHashMap();
int size = random.nextInt(5);
for (int i = 0; i < size; i++) {
map.put("value" + random.nextInt(), random.nextInt());
}
return map;
}
private static List<String> createRandomList() {
List<String> list = Lists.newArrayList();
int size = random.nextInt(5);
for (int i = 0; i < size; i++) {
list.add("value" + random.nextInt());
}
return list;
}
}
| 1,935 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util/test/ExtendedClass.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import lombok.EqualsAndHashCode;
import java.util.Random;
/**
* Used for {@link org.apache.gobblin.util.io.GsonInterfaceAdapterTest}.
*/
@EqualsAndHashCode(callSuper = true)
public class ExtendedClass extends BaseClass {
private final int otherField = new Random().nextInt();
}
| 1,936 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util/test/BaseClass.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import lombok.EqualsAndHashCode;
import java.util.Random;
/**
* Used for {@link org.apache.gobblin.util.io.GsonInterfaceAdapterTest}.
*/
@EqualsAndHashCode
public class BaseClass {
public BaseClass() {
this.field = Integer.toString(new Random().nextInt());
}
private String field;
}
| 1,937 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/util/io/GsonInterfaceAdapterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.io;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.gson.Gson;
import org.apache.gobblin.util.test.BaseClass;
import org.apache.gobblin.util.test.TestClass;
public class GsonInterfaceAdapterTest {
@Test(groups = {"gobblin.util.io"})
public void test() {
Gson gson = GsonInterfaceAdapter.getGson(Object.class);
TestClass test = new TestClass();
test.absent = Optional.absent();
Assert.assertNotEquals(test, new TestClass());
String ser = gson.toJson(test);
BaseClass deser = gson.fromJson(ser, BaseClass.class);
Assert.assertEquals(test, deser);
}
}
| 1,938 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/dataset/DatasetResolverTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.dataset;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
public class DatasetResolverTest {
@Test
public void testAsDescriptorResolver() {
DescriptorResolver resolver = new TestDatasetResolver();
State state = new State();
// Test dataset descriptor
DatasetDescriptor dataset = new DatasetDescriptor("hdfs", "/data/tracking/PageViewEvent");
Descriptor descriptor = resolver.resolve(dataset, state);
Assert.assertTrue(descriptor.getClass().isAssignableFrom(DatasetDescriptor.class));
Assert.assertEquals(descriptor.getName(), TestDatasetResolver.DATASET_NAME);
// Test partition descriptor
String partitionName = "hourly/2018/08/14/18";
PartitionDescriptor partition = new PartitionDescriptor(partitionName, dataset);
descriptor = resolver.resolve(partition, state);
Assert.assertTrue(descriptor.getClass().isAssignableFrom(DatasetDescriptor.class));
Assert.assertEquals(descriptor.getName(), TestDatasetResolver.DATASET_NAME);
// Test unsupported descriptor
Assert.assertEquals(resolver.resolve(new MockDescriptor("test"), state), null);
}
private static class TestDatasetResolver implements DatasetResolver {
static final String DATASET_NAME = "TEST";
@Override
public DatasetDescriptor resolve(DatasetDescriptor raw, State state) {
DatasetDescriptor descriptor = new DatasetDescriptor(raw.getPlatform(), DATASET_NAME);
raw.getMetadata().forEach(descriptor::addMetadata);
return descriptor;
}
}
private static class MockDescriptor extends Descriptor {
public MockDescriptor(String name) {
super(name);
}
}
}
| 1,939 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/dataset/DescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.dataset;
import java.net.URI;
import org.testng.Assert;
import org.testng.annotations.Test;
public class DescriptorTest {
@Test
public void testDatasetDescriptor() {
DatasetDescriptor dataset = new DatasetDescriptor("hdfs", "/data/tracking/PageViewEvent");
dataset.addMetadata("fsUri", "hdfs://test.com:2018");
Assert.assertNull(dataset.getStorageUrl());
DatasetDescriptor copy = dataset.copy();
Assert.assertEquals(copy.getName(), dataset.getName());
Assert.assertEquals(copy.getPlatform(), dataset.getPlatform());
Assert.assertEquals(copy.getMetadata(), dataset.getMetadata());
Assert.assertEquals(dataset, copy);
Assert.assertEquals(dataset.hashCode(), copy.hashCode());
//noinspection deprecation
Assert.assertEquals(dataset, DatasetDescriptor.fromDataMap(copy.toDataMap()));
}
@Test
public void testDatasetDescriptorWithCluster() {
DatasetDescriptor dataset =
new DatasetDescriptor("hdfs", URI.create("hdfs://hadoop.test"), "/data/tracking/PageViewEvent");
dataset.addMetadata("fsUri", "hdfs://test.com:2018");
Assert.assertEquals(dataset.getStorageUrl().toString(),"hdfs://hadoop.test");
DatasetDescriptor copy = dataset.copy();
Assert.assertEquals(copy.getName(), dataset.getName());
Assert.assertEquals(copy.getPlatform(), dataset.getPlatform());
Assert.assertEquals(copy.getMetadata(), dataset.getMetadata());
Assert.assertEquals(copy.getStorageUrl(), dataset.getStorageUrl());
Assert.assertEquals(dataset, copy);
Assert.assertEquals(dataset.hashCode(), copy.hashCode());
//noinspection deprecation
Assert.assertEquals(dataset, DatasetDescriptor.fromDataMap(copy.toDataMap()));
}
@Test
public void testPartitionDescriptor() {
DatasetDescriptor dataset = new DatasetDescriptor("hdfs", "/data/tracking/PageViewEvent");
String partitionName = "hourly/2018/08/14/18";
PartitionDescriptor partition = new PartitionDescriptor(partitionName, dataset);
// Test copy with new dataset
DatasetDescriptor dataset2 = new DatasetDescriptor("hive", "/data/tracking/PageViewEvent");
Descriptor partition2 = partition.copyWithNewDataset(dataset2);
Assert.assertEquals(partition2.getName(), partition.getName());
Assert.assertEquals(((PartitionDescriptor)partition2).getDataset(), dataset2);
// Test copy
PartitionDescriptor partition3 = partition.copy();
Assert.assertEquals(partition3.getDataset(), dataset);
Assert.assertEquals(partition3.getName(), partitionName);
}
}
| 1,940 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/stream/StreamEntityTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.stream;
import java.util.Random;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.ack.BasicAckableForTesting;
public class StreamEntityTest {
@Test
public void testAcking() {
MyStreamEntity streamEntity = new MyStreamEntity();
BasicAckableForTesting ackable1 = new BasicAckableForTesting();
streamEntity.addCallBack(ackable1);
BasicAckableForTesting ackable2 = new BasicAckableForTesting();
streamEntity.addCallBack(ackable2);
streamEntity.ack();
Assert.assertEquals(ackable1.acked, 1);
Assert.assertEquals(ackable2.acked, 1);
}
@Test
public void testSingleCloning() {
MyStreamEntity streamEntity = new MyStreamEntity();
BasicAckableForTesting ackable = new BasicAckableForTesting();
streamEntity.addCallBack(ackable);
MyStreamEntity clone = (MyStreamEntity) streamEntity.getSingleClone();
Assert.assertEquals(clone.id, streamEntity.id);
try {
streamEntity.getSingleClone();
Assert.fail();
} catch (IllegalStateException ise) {
// expected, cannot clone twice using getSingleClone
}
try {
streamEntity.forkCloner();
Assert.fail();
} catch (IllegalStateException ise) {
// expected, cannot clone twice
}
clone.ack();
Assert.assertEquals(ackable.acked, 1);
}
@Test
public void testMultipleClones() {
MyStreamEntity streamEntity = new MyStreamEntity();
BasicAckableForTesting ackable = new BasicAckableForTesting();
streamEntity.addCallBack(ackable);
StreamEntity.ForkCloner cloner = streamEntity.forkCloner();
MyStreamEntity clone1 = (MyStreamEntity) cloner.getClone();
Assert.assertEquals(clone1.id, streamEntity.id);
clone1.ack();
// cloner has not been closed, so ack does not spread
Assert.assertEquals(ackable.acked, 0);
MyStreamEntity clone2 = (MyStreamEntity) cloner.getClone();
Assert.assertEquals(clone2.id, streamEntity.id);
// close cloner to spread acks
cloner.close();
// ack second clone, should ack original
clone2.ack();
Assert.assertEquals(ackable.acked, 1);
try {
cloner.getClone();
Assert.fail();
} catch (IllegalStateException ise) {
// cloner has been closed, cannot create new clones
}
try {
streamEntity.getSingleClone();
Assert.fail();
} catch (IllegalStateException ise) {
// expected, cannot clone twice
}
try {
streamEntity.forkCloner();
Assert.fail();
} catch (IllegalStateException ise) {
// expected, cannot clone twice
}
}
@Test
public void testNack() {
MyStreamEntity streamEntity = new MyStreamEntity();
BasicAckableForTesting ackable = new BasicAckableForTesting();
streamEntity.addCallBack(ackable);
streamEntity.nack(new RuntimeException());
Assert.assertEquals(ackable.nacked, 1);
Assert.assertTrue(ackable.throwable instanceof RuntimeException);
}
public static class MyStreamEntity extends StreamEntity<String> {
private final int id;
public MyStreamEntity() {
this.id = new Random().nextInt();
}
public MyStreamEntity(int id) {
this.id = id;
}
@Override
protected StreamEntity<String> buildClone() {
return new MyStreamEntity(this.id);
}
}
}
| 1,941 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/stream/RecordEnvelopeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.stream;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.gson.JsonElement;
import org.apache.gobblin.ack.BasicAckableForTesting;
import org.apache.gobblin.fork.CopyNotSupportedException;
import org.apache.gobblin.fork.Copyable;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
import lombok.AllArgsConstructor;
import lombok.Getter;
public class RecordEnvelopeTest {
@Test
public void testDerivedRecordCreation() {
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordEnvelope<String> record = new RecordEnvelope<>("test", new MyWatermark(101));
record.addCallBack(ackable);
RecordEnvelope<String> derivedRecord = record.withRecord("testDerived");
derivedRecord.ack();
Assert.assertEquals(ackable.acked, 1);
Assert.assertEquals(((MyWatermark) derivedRecord.getWatermark()).getId(), 101);
}
@Test
public void testMultipleDerivedRecords() {
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordEnvelope<String> record = new RecordEnvelope<>("test", new MyWatermark(105));
record.addCallBack(ackable);
RecordEnvelope<String>.ForkRecordBuilder<String> forkRecordBuilder = record.forkRecordBuilder();
RecordEnvelope<String> derivedRecord = forkRecordBuilder.childRecord("testDerived");
derivedRecord.ack();
// not acked yet as forkRecordBuilder has not been closed
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(((MyWatermark) derivedRecord.getWatermark()).getId(), 105);
RecordEnvelope<String> derivedRecord2 = forkRecordBuilder.childRecord("testDerived2");
derivedRecord2.ack();
forkRecordBuilder.close();
Assert.assertEquals(ackable.acked, 1);
Assert.assertEquals(((MyWatermark) derivedRecord2.getWatermark()).getId(), 105);
}
@Test
public void testClone() {
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordEnvelope<CopyableRecord> record = new RecordEnvelope<>(new CopyableRecord(), new MyWatermark(110));
record.addCallBack(ackable);
RecordEnvelope<CopyableRecord> copy = (RecordEnvelope<CopyableRecord>) record.getSingleClone();
Assert.assertEquals(record.getRecord().id, copy.getRecord().id);
Assert.assertEquals(((MyWatermark) copy.getWatermark()).getId(), 110);
copy.ack();
Assert.assertEquals(ackable.acked, 1);
try {
record.getSingleClone();
Assert.fail();
} catch (IllegalStateException ise) {
// expected, cannot clone more than once using getSingleClone
}
}
@Test
public void testMultipleClones() {
BasicAckableForTesting ackable = new BasicAckableForTesting();
RecordEnvelope<CopyableRecord> record = new RecordEnvelope<>(new CopyableRecord(), new MyWatermark(110));
record.addCallBack(ackable);
StreamEntity.ForkCloner cloner = record.forkCloner();
RecordEnvelope<CopyableRecord> copy1 = (RecordEnvelope<CopyableRecord>) cloner.getClone();
RecordEnvelope<CopyableRecord> copy2 = (RecordEnvelope<CopyableRecord>) cloner.getClone();
cloner.close();
Assert.assertEquals(record.getRecord().id, copy1.getRecord().id);
Assert.assertEquals(((MyWatermark) copy1.getWatermark()).getId(), 110);
Assert.assertEquals(record.getRecord().id, copy2.getRecord().id);
Assert.assertEquals(((MyWatermark) copy2.getWatermark()).getId(), 110);
copy1.ack();
Assert.assertEquals(ackable.acked, 0);
copy2.ack();
Assert.assertEquals(ackable.acked, 1);
}
@Test
public void testRecordMetadata() {
RecordEnvelope<String> record = new RecordEnvelope<>("test", new MyWatermark(110));
record.setRecordMetadata("meta1", "value1");
Assert.assertEquals(record.getRecordMetadata("meta1"), "value1");
}
@Test
public void testRecordMetadataWithDerivedRecords() {
RecordEnvelope<String> record = new RecordEnvelope<>("test", new MyWatermark(110));
record.setRecordMetadata("meta1", "value1");
List list = new ArrayList();
list.add("item1");
record.setRecordMetadata("list", list);
RecordEnvelope<String>.ForkRecordBuilder<String> forkRecordBuilder = record.forkRecordBuilder();
RecordEnvelope<String> derived1 = forkRecordBuilder.childRecord("testDerived1");
RecordEnvelope<String> derived2 = forkRecordBuilder.childRecord("testDerived2");
RecordEnvelope<String> derived3 = derived2.withRecord("testDerived3");
forkRecordBuilder.close();
record.setRecordMetadata("meta2", "value2");
derived1.setRecordMetadata("meta3", "value3");
derived2.setRecordMetadata("meta4", "value4");
derived3.setRecordMetadata("meta5", "value5");
// clones should inherit the metadata at the time of the copy
Assert.assertEquals(record.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(derived1.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(derived2.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(derived3.getRecordMetadata("meta1"), "value1");
// new entries should not affect any copies
Assert.assertEquals(record.getRecordMetadata("meta2"), "value2");
Assert.assertNull(derived1.getRecordMetadata("meta2"));
Assert.assertNull(derived2.getRecordMetadata("meta2"));
Assert.assertNull(derived3.getRecordMetadata("meta2"));
Assert.assertEquals(derived1.getRecordMetadata("meta3"), "value3");
Assert.assertNull(record.getRecordMetadata("meta3"));
Assert.assertNull(derived2.getRecordMetadata("meta3"));
Assert.assertNull(derived3.getRecordMetadata("meta3"));
Assert.assertEquals(derived2.getRecordMetadata("meta4"), "value4");
Assert.assertNull(derived1.getRecordMetadata("meta4"));
Assert.assertNull(derived3.getRecordMetadata("meta4"));
Assert.assertNull(record.getRecordMetadata("meta4"));
Assert.assertEquals(derived3.getRecordMetadata("meta5"), "value5");
Assert.assertNull(derived1.getRecordMetadata("meta5"));
Assert.assertNull(derived2.getRecordMetadata("meta5"));
Assert.assertNull(record.getRecordMetadata("meta5"));
// no deep copy for values
((List)record.getRecordMetadata("list")).add("item2");
Assert.assertEquals(record.getRecordMetadata("list"), list);
Assert.assertEquals(derived1.getRecordMetadata("list"), list);
Assert.assertEquals(derived2.getRecordMetadata("list"), list);
Assert.assertEquals(derived3.getRecordMetadata("list"), list);
}
@Test
public void testRecordMetadataWithClones() {
RecordEnvelope<String> record = new RecordEnvelope<>("test", new MyWatermark(110));
record.setRecordMetadata("meta1", "value1");
List list = new ArrayList();
list.add("item1");
record.setRecordMetadata("list", list);
StreamEntity.ForkCloner cloner = record.forkCloner();
RecordEnvelope<String> copy1 = (RecordEnvelope<String>) cloner.getClone();
RecordEnvelope<String> copy2 = (RecordEnvelope<String>) cloner.getClone();
cloner.close();
RecordEnvelope<String> copy3 = (RecordEnvelope<String>)record.buildClone();
record.setRecordMetadata("meta2", "value2");
copy1.setRecordMetadata("meta3", "value3");
copy2.setRecordMetadata("meta4", "value4");
copy3.setRecordMetadata("meta5", "value5");
// clones should inherit the metadata at the time of the copy
Assert.assertEquals(record.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(copy1.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(copy2.getRecordMetadata("meta1"), "value1");
Assert.assertEquals(copy3.getRecordMetadata("meta1"), "value1");
// new entries should not affect any copies
Assert.assertEquals(record.getRecordMetadata("meta2"), "value2");
Assert.assertNull(copy1.getRecordMetadata("meta2"));
Assert.assertNull(copy2.getRecordMetadata("meta2"));
Assert.assertNull(copy3.getRecordMetadata("meta2"));
Assert.assertEquals(copy1.getRecordMetadata("meta3"), "value3");
Assert.assertNull(record.getRecordMetadata("meta3"));
Assert.assertNull(copy2.getRecordMetadata("meta3"));
Assert.assertNull(copy3.getRecordMetadata("meta3"));
Assert.assertEquals(copy2.getRecordMetadata("meta4"), "value4");
Assert.assertNull(copy1.getRecordMetadata("meta4"));
Assert.assertNull(copy3.getRecordMetadata("meta4"));
Assert.assertNull(record.getRecordMetadata("meta4"));
Assert.assertEquals(copy3.getRecordMetadata("meta5"), "value5");
Assert.assertNull(copy1.getRecordMetadata("meta5"));
Assert.assertNull(copy2.getRecordMetadata("meta5"));
Assert.assertNull(record.getRecordMetadata("meta5"));
// no deep copy for values
((List)record.getRecordMetadata("list")).add("item2");
Assert.assertEquals(record.getRecordMetadata("list"), list);
Assert.assertEquals(copy1.getRecordMetadata("list"), list);
Assert.assertEquals(copy2.getRecordMetadata("list"), list);
Assert.assertEquals(copy3.getRecordMetadata("list"), list);
}
@AllArgsConstructor
public static class MyWatermark implements CheckpointableWatermark {
@Getter
private final long id;
@Override
public String getSource() {
return "wm";
}
@Override
public ComparableWatermark getWatermark() {
return null;
}
@Override
public JsonElement toJson() {
return null;
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
return 0;
}
@Override
public int compareTo(CheckpointableWatermark o) {
return 0;
}
}
@AllArgsConstructor
public static class CopyableRecord implements Copyable<CopyableRecord> {
private final long id;
public CopyableRecord() {
this.id = new Random().nextLong();
}
@Override
public CopyableRecord copy() throws CopyNotSupportedException {
return new CopyableRecord(this.id);
}
}
}
| 1,942 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source/workunit/TimeZoneUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.workunit;
import java.time.ZoneId;
import java.util.TimeZone;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.joda.time.DateTimeZone;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TimeZoneUtilsTest {
@Test
public void testConfigurableTimeZone()
throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.EXTRACT_ID_TIME_ZONE, "America/Los_Angeles");
Extract extract = new Extract(state, Extract.TableType.APPEND_ONLY, "random", "table");
Assert.assertEquals(extract.getTimeZoneHelper(state).toTimeZone(),
TimeZone.getTimeZone(ZoneId.of("America/Los_Angeles")));
state.removeProp(ConfigurationKeys.EXTRACT_ID_TIME_ZONE);
extract = new Extract(state, Extract.TableType.APPEND_ONLY, "random", "table");
Assert.assertEquals(extract.getTimeZoneHelper(state), DateTimeZone.UTC);
}
}
| 1,943 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source/workunit/ExtractFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.workunit;
import java.util.Set;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Sets;
@Test(groups = { "gobblin.source.workunit" })
public class ExtractFactoryTest {
/**
* Verify that each {@link Extract} created by an {@ExtractFactory} has a unique ID.
*/
@Test
public void testGetUniqueExtract() {
ExtractFactory extractFactory = new ExtractFactory("yyyyMMddHHmmss");
Set<String> extractIDs = Sets.newHashSet();
int numOfExtracts = 100;
for (int i = 0; i < numOfExtracts; i++) {
extractIDs
.add(extractFactory.getUniqueExtract(Extract.TableType.APPEND_ONLY, "namespace", "table").getExtractId());
}
Assert.assertEquals(extractIDs.size(), numOfExtracts);
}
}
| 1,944 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source/extractor/TestWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
/**
* Implementation of {@link Watermark} used for testing purposes in {@link TestWatermark}.
*/
public class TestWatermark implements Watermark {
private static final Gson GSON = new Gson();
private long watermark = -1;
@Override
public JsonElement toJson() {
return WatermarkSerializerHelper.convertWatermarkToJson(this);
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
TestWatermark testLowWatermark = GSON.fromJson(lowWatermark.toJson(), TestWatermark.class);
TestWatermark testHighWatermark = GSON.fromJson(highWatermark.toJson(), TestWatermark.class);
return (short) (100 * (this.watermark - testLowWatermark.getLongWatermark()) / (testHighWatermark
.getLongWatermark() - testLowWatermark.getLongWatermark()));
}
public void setLongWatermark(long watermark) {
this.watermark = watermark;
}
public long getLongWatermark() {
return this.watermark;
}
}
| 1,945 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/source/extractor/WatermarkTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Tests for {@link Watermark}, {@link WatermarkInterval}, and {@link WatermarkSerializerHelper}.
*/
@Test(groups = {"gobblin.source.extractor"})
public class WatermarkTest {
@Test
public void testWatermarkWorkUnitSerialization() {
long lowWatermarkValue = 0;
long expectedHighWatermarkValue = 100;
TestWatermark lowWatermark = new TestWatermark();
lowWatermark.setLongWatermark(lowWatermarkValue);
TestWatermark expectedHighWatermark = new TestWatermark();
expectedHighWatermark.setLongWatermark(expectedHighWatermarkValue);
WatermarkInterval watermarkInterval = new WatermarkInterval(lowWatermark, expectedHighWatermark);
WorkUnit workUnit = new WorkUnit(null, null, watermarkInterval);
TestWatermark deserializedLowWatermark =
WatermarkSerializerHelper.convertJsonToWatermark(workUnit.getLowWatermark(),
TestWatermark.class);
TestWatermark deserializedExpectedHighWatermark =
WatermarkSerializerHelper.convertJsonToWatermark(workUnit.getExpectedHighWatermark(),
TestWatermark.class);
Assert.assertEquals(deserializedLowWatermark.getLongWatermark(), lowWatermarkValue);
Assert.assertEquals(deserializedExpectedHighWatermark.getLongWatermark(), expectedHighWatermarkValue);
}
@Test
public void testWatermarkWorkUnitStateSerialization() {
long actualHighWatermarkValue = 50;
TestWatermark actualHighWatermark = new TestWatermark();
actualHighWatermark.setLongWatermark(actualHighWatermarkValue);
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setActualHighWatermark(actualHighWatermark);
TestWatermark deserializedActualHighWatermark =
WatermarkSerializerHelper.convertJsonToWatermark(workUnitState.getActualHighWatermark(),
TestWatermark.class);
Assert.assertEquals(deserializedActualHighWatermark.getLongWatermark(), actualHighWatermarkValue);
}
}
| 1,946 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/testing/AssertWithBackoffTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.testing;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
/** Unit tests for {@link AssertWithBackoff} */
public class AssertWithBackoffTest {
@BeforeClass
public void setUp() {
BasicConfigurator.configure();
org.apache.log4j.Logger.getRootLogger().setLevel(Level.ERROR);
}
@Test
public void testComputeRetrySleep() {
final long infiniteFutureMs = Long.MAX_VALUE;
Assert.assertEquals(AssertWithBackoff.computeRetrySleep(0, 2, 100, infiniteFutureMs), 1);
Assert.assertEquals(AssertWithBackoff.computeRetrySleep(10, 2, 100, infiniteFutureMs), 20);
Assert.assertEquals(AssertWithBackoff.computeRetrySleep(50, 1, 100, infiniteFutureMs), 51);
Assert.assertEquals(AssertWithBackoff.computeRetrySleep(50, 3, 100, infiniteFutureMs), 100);
Assert.assertEquals(AssertWithBackoff.computeRetrySleep(50, 3, 60, System.currentTimeMillis() + 100), 60);
long sleepMs = AssertWithBackoff.computeRetrySleep(50, 3, 1000, System.currentTimeMillis() + 100);
Assert.assertTrue(sleepMs <= 100);
}
@Test
public void testAssertWithBackoff_conditionTrue() throws Exception {
Logger log = LoggerFactory.getLogger("testAssertWithBackoff_conditionTrue");
AssertWithBackoff.create().logger(log).timeoutMs(1000)
.assertTrue(Predicates.<Void>alwaysTrue(), "should always succeed");
}
@Test
public void testAssertWithBackoff_conditionEventuallyTrue() throws Exception {
Logger log = LoggerFactory.getLogger("testAssertWithBackoff_conditionEventuallyTrue");
setLogjLevelForLogger(log, Level.ERROR);
final AtomicInteger cnt = new AtomicInteger();
AssertWithBackoff.create().logger(log).timeoutMs(100000).backoffFactor(2.0)
.assertEquals(new Function<Void, Integer>() {
@Override public Integer apply(Void input) { return cnt.incrementAndGet(); }
}, 5, "should eventually succeed");
}
@Test
public void testAssertWithBackoff_conditionFalse() throws Exception {
Logger log = LoggerFactory.getLogger("testAssertWithBackoff_conditionFalse");
setLogjLevelForLogger(log, Level.ERROR);
long startTimeMs = System.currentTimeMillis();
try {
AssertWithBackoff.create().logger(log).timeoutMs(50)
.assertTrue(Predicates.<Void>alwaysFalse(), "should timeout");
Assert.fail("TimeoutException expected");
} catch (TimeoutException e) {
//Expected
}
long durationMs = System.currentTimeMillis() - startTimeMs;
log.debug("assert took " + durationMs + "ms");
Assert.assertTrue(durationMs >= 50L, Long.toString(durationMs) + ">= 50ms");
}
@Test
public void testAssertWithBackoff_RuntimeException() throws Exception {
Logger log = LoggerFactory.getLogger("testAssertWithBackoff_RuntimeException");
setLogjLevelForLogger(log, Level.ERROR);
try {
AssertWithBackoff.create().logger(log).timeoutMs(50)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) { throw new RuntimeException("BLAH"); }
}, "should throw RuntimeException");
Assert.fail("should throw RuntimeException");
} catch (RuntimeException e) {
Assert.assertTrue(e.getMessage().indexOf("BLAH") > 0, e.getMessage());
}
}
public static void setLogjLevelForLogger(Logger log, Level logLevel) {
org.apache.log4j.Logger log4jLogger = org.apache.log4j.Logger.getLogger(log.getName());
log4jLogger.setLevel(logLevel);
}
}
| 1,947 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/writer/FsWriterMetricsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Set;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableSet;
public class FsWriterMetricsTest {
@Test
public void testSerialization() throws IOException {
final String WRITER_ID = "foobar123";
final PartitionIdentifier PARTITION_KEY = new PartitionIdentifier("_partitionInfo", 3);
final Set<FsWriterMetrics.FileInfo> FILE_INFOS = ImmutableSet.of(
new FsWriterMetrics.FileInfo("file1", 1234),
new FsWriterMetrics.FileInfo("file2", 4321)
);
String metricsJson = new FsWriterMetrics(WRITER_ID, PARTITION_KEY, FILE_INFOS).toJson();
FsWriterMetrics parsedMetrics = FsWriterMetrics.fromJson(metricsJson);
Assert.assertEquals(parsedMetrics.writerId, WRITER_ID);
Assert.assertEquals(parsedMetrics.partitionInfo, PARTITION_KEY);
Assert.assertEquals(parsedMetrics.fileInfos, FILE_INFOS);
}
}
| 1,948 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/test/java/org/apache/gobblin/ack/HierarchicalAckableTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.ack;
import org.testng.Assert;
import org.testng.TestException;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
public class HierarchicalAckableTest {
@Test
public void testCloseBeforeAck() throws Exception {
BasicAckableForTesting ackable = new BasicAckableForTesting();
HierarchicalAckable hierarchicalAckable = new HierarchicalAckable(Lists.newArrayList(ackable));
Ackable child1 = hierarchicalAckable.newChildAckable();
Ackable child2 = hierarchicalAckable.newChildAckable();
hierarchicalAckable.close();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
child2.ack();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
// acking same child twice does not ack parent
child2.ack();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
child1.ack();
Assert.assertEquals(ackable.acked, 1);
Assert.assertEquals(ackable.nacked, 0);
// Acking again changes nothing
child1.ack();
Assert.assertEquals(ackable.acked, 1);
Assert.assertEquals(ackable.nacked, 0);
}
@Test
public void testAckBeforeClose() throws Exception {
BasicAckableForTesting ackable = new BasicAckableForTesting();
HierarchicalAckable hierarchicalAckable = new HierarchicalAckable(Lists.newArrayList(ackable));
Ackable child1 = hierarchicalAckable.newChildAckable();
Ackable child2 = hierarchicalAckable.newChildAckable();
child2.ack();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
child1.ack();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
hierarchicalAckable.close();
Assert.assertEquals(ackable.acked, 1);
Assert.assertEquals(ackable.nacked, 0);
}
@Test
public void testChildNacked() throws Exception {
BasicAckableForTesting ackable = new BasicAckableForTesting();
HierarchicalAckable hierarchicalAckable = new HierarchicalAckable(Lists.newArrayList(ackable));
Ackable child1 = hierarchicalAckable.newChildAckable();
Ackable child2 = hierarchicalAckable.newChildAckable();
child2.ack();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
hierarchicalAckable.close();
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 0);
child1.nack(new TestException("test"));
Assert.assertEquals(ackable.acked, 0);
Assert.assertEquals(ackable.nacked, 1);
Assert.assertNotNull(ackable.throwable);
Assert.assertTrue(ackable.throwable instanceof HierarchicalAckable.ChildrenFailedException);
Assert.assertEquals(((HierarchicalAckable.ChildrenFailedException) ackable.throwable).getFailureCauses().size(), 1);
}
@Test
public void testMultipleParents() throws Exception {
BasicAckableForTesting ackable1 = new BasicAckableForTesting();
BasicAckableForTesting ackable2 = new BasicAckableForTesting();
HierarchicalAckable hierarchicalAckable = new HierarchicalAckable(Lists.newArrayList(ackable1, ackable2));
Ackable child1 = hierarchicalAckable.newChildAckable();
hierarchicalAckable.close();
child1.ack();
Assert.assertEquals(ackable1.acked, 1);
Assert.assertEquals(ackable2.acked, 1);
}
}
| 1,949 |
0 | Create_ds/gobblin/gobblin-api/src/test/java/com/linkedin | Create_ds/gobblin/gobblin-api/src/test/java/com/linkedin/gobblin/TestAlias.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.gobblin;
import org.apache.gobblin.annotation.Alias;
/**
* Dummy test class to test alias resolution for com.linkedin.gobblin package
*/
public class TestAlias {
@Alias(value = "com.alias")
public static class DummyAliasCom implements IDummyAliasCom {
}
public static interface IDummyAliasCom {
}
}
| 1,950 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/Constructs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicy;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.writer.DataWriter;
/**
* Enumeration of available gobblin constructs.
*/
public enum Constructs {
/** {@link org.apache.gobblin.source.Source} */
SOURCE("Source", Source.class),
/** {@link org.apache.gobblin.source.extractor.Extractor} */
EXTRACTOR("Extractor", Extractor.class),
/** {@link org.apache.gobblin.converter.Converter} */
CONVERTER("Converter", Converter.class),
/** {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy} */
ROW_QUALITY_CHECKER("RowLevelPolicy", RowLevelPolicy.class),
/** {@link org.apache.gobblin.qualitychecker.task.TaskLevelPolicy} */
TASK_QUALITY_CHECKER("TaskLevelPolicy", TaskLevelPolicy.class),
/** {@link org.apache.gobblin.fork.ForkOperator} */
FORK_OPERATOR("ForkOperator", ForkOperator.class),
/** {@link org.apache.gobblin.writer.DataWriter} */
WRITER("DataWriter", DataWriter.class),
/** {@link org.apache.gobblin.publisher.DataPublisher} */
DATA_PUBLISHER("DataPublisher",DataPublisher.class);
private final String name;
private final Class<?> klazz;
Constructs(String name, Class<?> klazz) {
this.name = name;
this.klazz = klazz;
}
@Override
public String toString() {
return this.name;
}
public Class<?> constructClass() {
return this.klazz;
}
}
| 1,951 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/CommitStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
/**
* A step during committing a dataset that should be executed atomically with other steps under exactly-once semantics.
* An example is publishing the data files of a dataset, which should be executed atomically with persisting the
* dataset state in order to avoid pulling duplicate data.
*
* @author Ziyang Liu
*/
@Alpha
public interface CommitStep {
/**
* Determine whether the commit step has been completed.
*/
public boolean isCompleted() throws IOException;
/**
* Execute the commit step.
*/
public void execute() throws IOException;
}
| 1,952 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/CommitStepException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import java.io.IOException;
public class CommitStepException extends IOException {
public CommitStepException(String message, Throwable t) {
super(message, t);
}
public CommitStepException(String message) {
super(message);
}
}
| 1,953 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/CommitSequenceStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import java.io.IOException;
import java.util.Collection;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
/**
* A store for {@link CommitSequence}s. A {@link CommitSequence} is identified by job name and dataset URN.
*
* @author Ziyang Liu
*/
@Alpha
public interface CommitSequenceStore {
/**
* Whether a {@link CommitSequence} with the given job name exists in the store.
*/
boolean exists(String jobName) throws IOException;
/**
* Whether a {@link CommitSequence} with the given job name and dataset URN exists in a store.
*/
public boolean exists(String jobName, String datasetUrn) throws IOException;
/**
* delete a given job name from the store along with all {@link CommitSequence}s associated with this job.
*/
public void delete(String jobName) throws IOException;
/**
* delete the {@link CommitSequence} for the given job name and dataset URN.
*/
public void delete(String jobName, String datasetUrn) throws IOException;
/**
* Put a {@link CommitSequence} with the given job name and dataset URN.
*
* @throws IOException if a {@link CommitSequence} for the given job name and dataset URN exists in the store.
*/
public void put(String jobName, String datasetUrn, CommitSequence commitSequence) throws IOException;
/**
* Get a {@link Collection} of dataset URNs with the given job name.
*/
public Collection<String> get(String jobName) throws IOException;
/**
* Get the {@link CommitSequence} associated with the given job name and dataset URN.
*/
public Optional<CommitSequence> get(String jobName, String datasetUrn) throws IOException;
}
| 1,954 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/CommitSequence.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A sequence of {@link CommitStep}s that should be executed atomically.
*
* <p>
* Typical pattern for building a {@link CommitSequence} with two {@link CommitStep}s:
*
* <pre>
* {@code
* CommitSequence sequence = CommitSequence.newBuilder()
* .withJobName(jobName)
* .withDatasetUrn(datasetUrn)
*
* .beginStep(FsRenameCommitStep.Builder.class)
* .withProps(props)
* .from(srcPath)
* .to(dstPath)
* .endStep()
*
* .beginStep(DatasetStateCommitStep.Builder.class)
* .withProps(props)
* .withDatasetUrn(datasetUrn)
* .withDatasetState(datasetState)
* .endStep()
*
* .build();
* }
* </pre>
* </p>
*
* @author Ziyang Liu
*/
@Alpha
@Slf4j
public class CommitSequence {
@Getter
private final String jobName;
@Getter
private final String datasetUrn;
private final List<CommitStep> steps;
private CommitSequence(Builder builder) {
this.jobName = builder.jobName;
this.datasetUrn = builder.datasetUrn;
this.steps = ImmutableList.copyOf(builder.steps);
}
public static class Builder {
private String jobName;
private String datasetUrn;
private final List<CommitStep> steps = Lists.newArrayList();
/**
* Set the job name for the commit sequence.
*/
public Builder withJobName(String jobName) {
this.jobName = jobName;
return this;
}
/**
* Set the dataset URN for the commit sequence.
*/
public Builder withDatasetUrn(String datasetUrn) {
this.datasetUrn = datasetUrn;
return this;
}
/**
* Build a {@link CommitStep}.
*
* @param builderClass The builder class for the {@link CommitStep}, which should extend
* {@link CommitStepBase.Builder}.
* @return An instance of the builder class for the {@link CommitStep}.
*/
public <T extends CommitStepBase.Builder<?>> T beginStep(Class<T> builderClass) {
try {
return builderClass.getDeclaredConstructor(this.getClass()).newInstance(this);
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Failed to instantiate " + builderClass, e);
}
}
/**
* Add a {@link CommitStep} to the commit sequence.
*/
public Builder addStep(CommitStep step) {
this.steps.add(step);
return this;
}
public CommitSequence build() {
Preconditions.checkState(!Strings.isNullOrEmpty(this.jobName), "Job name not specified for commit sequence");
Preconditions.checkState(!Strings.isNullOrEmpty(this.datasetUrn),
"Dataset URN not specified for commit sequence");
Preconditions.checkState(!this.steps.isEmpty(), "No commit steps specified for the commit sequence");
return new CommitSequence(this);
}
}
/**
* Execute the {@link CommitStep}s in the order they are added to the commit sequence.
*/
public void execute() {
try {
for (CommitStep step : this.steps) {
if (!step.isCompleted()) {
step.execute();
}
}
} catch (Throwable t) {
log.error("Commit failed for dataset " + this.datasetUrn, t);
throw Throwables.propagate(t);
}
}
public static Builder newBuilder() {
return new Builder();
}
}
| 1,955 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/DeliverySemantics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* The semantics for data delivery.
*
* @author Ziyang Liu
*/
public enum DeliverySemantics {
/**
* Each data record from the source is guaranteed to be delivered at least once.
*/
AT_LEAST_ONCE,
/**
* Each data record from the source is guaranteed to be delievered exactly once.
*/
EXACTLY_ONCE;
/**
* Get the devliery semantics type from {@link ConfigurationKeys#DELIVERY_SEMANTICS}.
* The default value is {@link Type#AT_LEAST_ONCE}.
*/
public static DeliverySemantics parse(State state) {
String value =
state.getProp(ConfigurationKeys.GOBBLIN_RUNTIME_DELIVERY_SEMANTICS, AT_LEAST_ONCE.toString()).toUpperCase();
Optional<DeliverySemantics> semantics = Enums.getIfPresent(DeliverySemantics.class, value);
Preconditions.checkState(semantics.isPresent(), value + " is not a valid delivery semantics");
return semantics.get();
}
}
| 1,956 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/commit/CommitStepBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.commit;
import java.io.IOException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
/**
* A base implementation of {@link CommitStep}.
*
* @author Ziyang Liu
*/
@Alpha
public abstract class CommitStepBase implements CommitStep {
protected final State props;
protected CommitStepBase(Builder<? extends Builder<?>> builder) {
Preconditions.checkNotNull(builder.props);
this.props = builder.props;
}
public abstract static class Builder<T extends Builder<?>> {
private final Optional<CommitSequence.Builder> commitSequenceBuilder;
protected State props;
protected Builder() {
this.commitSequenceBuilder = Optional.<CommitSequence.Builder> absent();
}
protected Builder(CommitSequence.Builder commitSequenceBuilder) {
Preconditions.checkNotNull(commitSequenceBuilder);
this.commitSequenceBuilder = Optional.of(commitSequenceBuilder);
}
@SuppressWarnings("unchecked")
public T withProps(State props) {
this.props = new State(props.getProperties());
return (T) this;
}
public CommitSequence.Builder endStep() throws IOException {
Preconditions.checkState(this.commitSequenceBuilder.isPresent());
this.commitSequenceBuilder.get().addStep(build());
return this.commitSequenceBuilder.get();
}
public abstract CommitStep build() throws IOException;
}
}
| 1,957 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/fork/ForkOperator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
import java.io.Closeable;
import java.util.List;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An interface for fork operators that convert one input data record into multiple
* records. So essentially this operator forks one input data stream into multiple
* data streams. This interface allows user to plugin their fork logic.
*
* @author Yinan Li
*
* @param <S> schema data type
* @param <D> data record data type
*/
public interface ForkOperator<S, D> extends Closeable {
/**
* Initialize this {@link ForkOperator}.
*
* @param workUnitState {@link WorkUnitState} carrying the configuration
*/
public void init(WorkUnitState workUnitState)
throws Exception;
/**
* Get the number of branches after the fork.
*
* @param workUnitState {@link WorkUnitState} carrying the configuration
* @return number of branches after the fork
*/
public int getBranches(WorkUnitState workUnitState);
/**
* Get a list of {@link java.lang.Boolean}s indicating if the schema should go to each branch.
*
* @param workUnitState {@link WorkUnitState} carrying the configuration
* @param input input schema
* @return list of {@link java.lang.Boolean}s
*/
public List<Boolean> forkSchema(WorkUnitState workUnitState, S input);
/**
* Get a list of {@link java.lang.Boolean}s indicating if the record should go to each branch.
*
* @param workUnitState {@link WorkUnitState} carrying the configuration
* @param input input data record
* @return list of {@link java.lang.Boolean}s
*/
public List<Boolean> forkDataRecord(WorkUnitState workUnitState, D input);
}
| 1,958 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/fork/Forker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import io.reactivex.Flowable;
import io.reactivex.flowables.ConnectableFlowable;
import io.reactivex.functions.Predicate;
import lombok.Data;
/**
* Forks a {@link RecordStreamWithMetadata} into multiple branches specified by a {@link ForkOperator}.
*
* Each forked stream is a mirror of the original stream.
*/
public class Forker {
/**
* Obtain the {@link ForkedStream} for the input {@link RecordStreamWithMetadata} and {@link ForkOperator}.
* @param inputStream input {@link Flowable} of records.
* @param forkOperator {@link ForkOperator} specifying the fork behavior.
* @param workUnitState work unit configuration.
* @return a {@link ForkedStream} with the forked streams.
* @throws Exception if the {@link ForkOperator} throws any exceptions.
*/
public <D, S> ForkedStream<D, S>
forkStream(RecordStreamWithMetadata<D, S> inputStream, ForkOperator<S, D> forkOperator, WorkUnitState workUnitState)
throws Exception {
int branches = forkOperator.getBranches(workUnitState);
// Set fork.branches explicitly here so the rest task flow can pick it up
workUnitState.setProp(ConfigurationKeys.FORK_BRANCHES_KEY, branches);
forkOperator.init(workUnitState);
List<Boolean> forkedSchemas = forkOperator.forkSchema(workUnitState, inputStream.getGlobalMetadata().getSchema());
int activeForks = (int) forkedSchemas.stream().filter(b -> b).count();
Preconditions.checkState(forkedSchemas.size() == branches, String
.format("Number of forked schemas [%d] is not equal to number of branches [%d]", forkedSchemas.size(),
branches));
Flowable<RecordWithForkMap<D>> forkedStream = inputStream.getRecordStream().map(r -> {
if (r instanceof RecordEnvelope) {
RecordEnvelope<D> recordEnvelope = (RecordEnvelope<D>) r;
return new RecordWithForkMap<>(recordEnvelope, forkOperator.forkDataRecord(workUnitState, recordEnvelope.getRecord()));
} else if (r instanceof ControlMessage) {
return new RecordWithForkMap<D>((ControlMessage<D>) r, branches);
} else {
throw new IllegalStateException("Expected RecordEnvelope or ControlMessage.");
}
});
if (activeForks > 1) {
forkedStream = forkedStream.share();
}
List<RecordStreamWithMetadata<D, S>> forkStreams = Lists.newArrayList();
boolean mustCopy = mustCopy(forkedSchemas);
for(int i = 0; i < forkedSchemas.size(); i++) {
if (forkedSchemas.get(i)) {
final int idx = i;
Flowable<StreamEntity<D>> thisStream =
forkedStream.filter(new ForkFilter<>(idx)).map(RecordWithForkMap::getRecordCopyIfNecessary);
forkStreams.add(inputStream.withRecordStream(thisStream,
mustCopy ? (GlobalMetadata<S>) CopyHelper.copy(inputStream.getGlobalMetadata()) :
inputStream.getGlobalMetadata()));
} else {
forkStreams.add(null);
}
}
return new ForkedStream<>(forkStreams);
}
private static boolean mustCopy(List<Boolean> forkMap) {
return forkMap.stream().filter(b -> b).count() >= 2;
}
/**
* An object containing the forked streams and a {@link ConnectableFlowable} used to connect the stream when all
* streams have been subscribed to.
*/
@Data
public static class ForkedStream<D, S> {
/** A list of forked streams. Note some of the forks may be null if the {@link ForkOperator} marks them as disabled. */
private final List<RecordStreamWithMetadata<D, S>> forkedStreams;
}
/**
* Filter records so that only records corresponding to flow {@link #forkIdx} pass.
*/
@Data
private static class ForkFilter<D> implements Predicate<RecordWithForkMap<D>> {
private final int forkIdx;
@Override
public boolean test(RecordWithForkMap<D> dRecordWithForkMap) {
return dRecordWithForkMap.sendToBranch(this.forkIdx);
}
}
/**
* Used to hold a record as well and the map specifying which forks it should go to.
*/
private static class RecordWithForkMap<D> {
private final StreamEntity<D> record;
private final List<Boolean> forkMap;
private final boolean mustCopy;
private final StreamEntity.ForkCloner cloner;
private long copiesLeft;
public RecordWithForkMap(RecordEnvelope<D> record, List<Boolean> forkMap) {
this.record = record;
this.forkMap = Lists.newArrayList(forkMap);
this.mustCopy = mustCopy(forkMap);
this.copiesLeft = this.forkMap.stream().filter(x -> x).count();
this.cloner = buildForkCloner();
}
public RecordWithForkMap(ControlMessage<D> record, int activeBranchesForRecord) {
this.record = record;
this.forkMap = null;
this.copiesLeft = activeBranchesForRecord;
this.mustCopy = this.copiesLeft > 1;
this.cloner = buildForkCloner();
}
private StreamEntity.ForkCloner buildForkCloner() {
if (this.mustCopy) {
return this.record.forkCloner();
} else {
return null;
}
}
private synchronized StreamEntity<D> getRecordCopyIfNecessary() throws CopyNotSupportedException {
if(this.mustCopy) {
StreamEntity<D> clone = this.cloner.getClone();
this.copiesLeft--;
if (this.copiesLeft <= 0) {
this.cloner.close();
}
return clone;
} else {
return this.record;
}
}
public boolean sendToBranch(int idx) {
if (record instanceof RecordEnvelope) {
return this.forkMap.get(idx);
} else {
return true;
}
}
}
}
| 1,959 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/fork/Copyable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
/**
* An interface for classes that supports making copies of their instances.
*
* @author Yinan Li
*/
public interface Copyable<T> {
/**
* Make a new copy of this instance.
*
* @return new copy of this instance
*/
public T copy()
throws CopyNotSupportedException;
}
| 1,960 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/fork/CopyHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
/**
* A helper class to copy things that may or may not be {@link Copyable}.
* Supports implementations for common primitive types.
*/
public class CopyHelper {
/**
* Check if an object is copyable using the {@link #copy(Object)} method.
* @param thing: the object that needs to be copied
* @return: true if {@link CopyHelper} can copy this thing, false otherwise
*/
public static boolean isCopyable(Object thing) {
if (
(thing instanceof Copyable)
|| (thing instanceof byte[])
|| (isImmutableType(thing))
) {
return true;
}
return false;
}
/**
* Contains a collection of supported immutable types for copying.
* Only keep the types that are worth supporting as record types.
* @param thing: an Object being checked
* @return true if supported immutable type, false otherwise
*/
private static boolean isImmutableType(Object thing) {
return ((thing == null)
|| (thing instanceof String)
|| (thing instanceof Integer)
|| (thing instanceof Long));
}
/**
* Copy this object if needed.
* @param thing : this object that needs to be copied
* @return: a possibly copied instance
* @throws CopyNotSupportedException if thing needs to be copied but cannot be
*/
public static Object copy(Object thing) throws CopyNotSupportedException {
if (!isCopyable(thing)) {
throw new CopyNotSupportedException(thing.getClass().getName() + " cannot be copied. See Copyable");
}
if (thing instanceof Copyable) {
return ((Copyable) thing).copy();
}
// Support for a few primitive types out of the box
if (thing instanceof byte[]) {
byte[] copy = new byte[((byte[]) thing).length];
System.arraycopy(thing, 0, copy, 0, ((byte[]) thing).length);
return copy;
}
// Assume that everything other type is immutable, not checking this again
return thing;
}
}
| 1,961 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/fork/CopyNotSupportedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.fork;
/**
* A type of {@link java.lang.Exception}s thrown when copying is not supported.
*
* @author Yinan Li
*/
public class CopyNotSupportedException extends Exception {
private static final long serialVersionUID = 1L;
public CopyNotSupportedException(String message) {
super(message);
}
}
| 1,962 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/crypto/CredentialStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.Map;
/**
* Interface for a simple CredentialStore that simply has a set of byte-encoded keys. Format
* of the underlying keys is left to the implementor.
*/
public interface CredentialStore {
/**
* Get the binary encoded key with the given key
* @param id Key to lookup
* @return null if the key does not exist; encoded-key if it does
*/
byte[] getEncodedKey(String id);
/**
* List all binary encoded keys in the credential store
* @return all binary encoded keys in the credential store
*/
Map<String, byte[]> getAllEncodedKeys();
}
| 1,963 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/crypto/CredentialStoreProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.Map;
/**
* Represents a factory object that can build CredentialStores based on a set of config.
*/
public interface CredentialStoreProvider {
/**
* Build a credential store based on the passed in configuration parameters -
* EncryptionConfigParser can be used to help parse known keys out of the config bag.
*
* If this provider does not know how to build the requested credential store it should
* return null.
*/
CredentialStore buildCredentialStore(Map<String, Object> parameters);
}
| 1,964 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/crypto/EncryptionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.Map;
import org.apache.gobblin.codec.StreamCodec;
/**
* Represents a builder that can construct encryption providers given a type and various
* configuration parameters.
*/
public interface EncryptionProvider {
/**
* Build a StreamEncryptor with the given configuration parameters.
*
* If the provider cannot satisfy the request it should return null.
*/
StreamCodec buildStreamCryptoProvider(String algorithm, Map<String, Object> parameters);
}
| 1,965 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/initializer/Initializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.initializer;
import java.io.Closeable;
public interface Initializer extends Closeable {
/**
* Initialize for the writer.
*
* @param state
* @param workUnits WorkUnits created by Source
*/
public void initialize();
/**
* Removed checked exception.
* {@inheritDoc}
* @see java.io.Closeable#close()
*/
@Override
public void close();
}
| 1,966 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/initializer/NoopInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.initializer;
import lombok.ToString;
@ToString
public class NoopInitializer implements Initializer {
public static final NoopInitializer INSTANCE = new NoopInitializer();
private NoopInitializer() {}
@Override
public void initialize() {}
@Override
public void close() {}
}
| 1,967 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/StateUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
/**
* Utility class for dealing with {@link State} objects.
*/
public class StateUtils {
/**
* Converts a {@link JsonObject} to a {@link State} object. It does not add any keys specified in the excludeKeys array
*/
public static State jsonObjectToState(JsonObject jsonObject, String... excludeKeys) {
State state = new State();
List<String> excludeKeysList = excludeKeys == null ? Lists.<String>newArrayList() : Arrays.asList(excludeKeys);
for (Map.Entry<String, JsonElement> jsonObjectEntry : jsonObject.entrySet()) {
if (!excludeKeysList.contains(jsonObjectEntry.getKey())) {
state.setProp(jsonObjectEntry.getKey(), jsonObjectEntry.getValue().getAsString());
}
}
return state;
}
}
| 1,968 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/ConfigurationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.io.IOException;
public class ConfigurationException extends IOException {
public ConfigurationException(String message, Exception e) {
super(message, e);
}
}
| 1,969 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/ImmutableWorkUnitState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.io.DataInput;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.source.extractor.Watermark;
/**
* An immutable version of {@link WorkUnitState}.
*
* @author Yinan Li
*/
public class ImmutableWorkUnitState extends WorkUnitState {
public ImmutableWorkUnitState(WorkUnitState workUnitState) {
super(workUnitState.getWorkunit(), workUnitState.getJobState());
super.addAll(workUnitState.getSpecProperties());
}
@Override
public void setWorkingState(WorkingState state) {
throw new UnsupportedOperationException();
}
@Override
public void setActualHighWatermark(Watermark watermark) {
throw new UnsupportedOperationException();
}
@Deprecated
@Override
public void setHighWaterMark(long value) {
throw new UnsupportedOperationException();
}
@Override
public void setProp(String key, Object value) {
throw new UnsupportedOperationException();
}
@Override
public void addAll(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
public void addAllIfNotExist(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
public void overrideWith(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
public void setId(String id) {
throw new UnsupportedOperationException();
}
@Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void backoffActualHighWatermark() {
throw new UnsupportedOperationException();
}
@Override
public synchronized void appendToListProp(String key, String value) {
throw new UnsupportedOperationException();
}
}
| 1,970 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/WorkUnitState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Properties;
import java.util.Set;
import com.google.common.base.Strings;
import com.google.common.collect.Sets;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.broker.iface.SubscopedBrokerBuilder;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.ImmutableWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import javax.annotation.Nullable;
import lombok.Getter;
/**
* This class encapsulates a {@link WorkUnit} instance and additionally holds all the
* task runtime state of that {@link WorkUnit}.
*
* <p>
* Properties set in the encapsulated {@link WorkUnit} can be overridden at runtime,
* with the original values available through the {@link #getWorkunit()} method.
* Getters will return values set at task runtime if available, or the corresponding
* values from encapsulated {@link WorkUnit} if they are not set at task runtime.
* </p>
*
* @author kgoodhop
*/
public class WorkUnitState extends State {
private static final String FINAL_CONSTRUCT_STATE_PREFIX = "construct.final.state.";
private static final JsonParser JSON_PARSER = new JsonParser();
private static final Gson GSON = new Gson();
public String getOutputFilePath() {
return this.workUnit.getOutputFilePath();
}
/**
* Runtime state of the {@link WorkUnit}.
*
* <p>
* The final state indicating successfully completed work is COMMITTED.
* SUCCESSFUL only implies a task has finished, but doesn't imply the work
* has been committed.
* </p>
*/
public enum WorkingState {
PENDING,
RUNNING,
SUCCESSFUL,
COMMITTED,
FAILED,
CANCELLED,
SKIPPED
}
private final WorkUnit workUnit;
@Getter
private State jobState;
transient private final SharedResourcesBroker<GobblinScopeTypes> taskBroker;
/**
* Default constructor used for deserialization.
*/
public WorkUnitState() {
this.workUnit = WorkUnit.createEmpty();
this.jobState = new State();
// Not available on deserialization
this.taskBroker = null;
}
/**
* Constructor.
*
* @param workUnit a {@link WorkUnit} instance based on which a {@link WorkUnitState} instance is constructed
* @deprecated It is recommended to use {@link #WorkUnitState(WorkUnit, State)} rather than combining properties
* in the job state into the workunit.
*/
@Deprecated
public WorkUnitState(WorkUnit workUnit) {
this.workUnit = workUnit;
this.jobState = new State();
this.taskBroker = null;
}
/**
* If creating a {@link WorkUnitState} for use by a task, use {@link #WorkUnitState(WorkUnit, State, SharedResourcesBroker)}
* instead.
*/
public WorkUnitState(WorkUnit workUnit, State jobState) {
this(workUnit, jobState, buildTaskBroker(null, jobState, workUnit));
}
public WorkUnitState(WorkUnit workUnit, State jobState, SubscopedBrokerBuilder<GobblinScopeTypes, ?> taskBrokerBuilder) {
this(workUnit, jobState, buildTaskBroker(taskBrokerBuilder, jobState, workUnit));
}
public WorkUnitState(WorkUnit workUnit, State jobState, SharedResourcesBroker<GobblinScopeTypes> taskBroker) {
this.workUnit = workUnit;
this.jobState = jobState;
this.taskBroker = taskBroker;
}
private static SharedResourcesBroker<GobblinScopeTypes> buildTaskBroker(
SubscopedBrokerBuilder<GobblinScopeTypes, ?> taskBrokerBuilder, State jobState, WorkUnit workUnit) {
return taskBrokerBuilder == null ? null : taskBrokerBuilder.build();
}
/**
* Get a {@link SharedResourcesBroker} scoped for this task.
*/
public SharedResourcesBroker<GobblinScopeTypes> getTaskBroker() {
if (this.taskBroker == null) {
throw new UnsupportedOperationException("Task broker is only available within a task. If this exception was thrown "
+ "from within a task, the JobLauncher did not specify a task broker.");
}
return this.taskBroker;
}
/**
* Get a {@link SharedResourcesBroker} scoped for this task or null if it doesn't exist. This is used for internal calls.
*/
@Nullable public SharedResourcesBroker<GobblinScopeTypes> getTaskBrokerNullable() {
return this.taskBroker;
}
/**
* Get an {@link ImmutableWorkUnit} that wraps the internal {@link WorkUnit}.
*
* @return an {@link ImmutableWorkUnit} that wraps the internal {@link WorkUnit}
*/
public WorkUnit getWorkunit() {
return new ImmutableWorkUnit(this.workUnit);
}
/**
* Override {@link #workUnit}'s properties with new commonProps and specProps.
*/
public void setWuProperties(Properties commonProps, Properties specProps) {
this.workUnit.setProps(commonProps, specProps);
}
/**
* Get the current runtime state of the {@link WorkUnit}.
*
* @return {@link WorkingState} of the {@link WorkUnit}
*/
public WorkingState getWorkingState() {
return WorkingState
.valueOf(getProp(ConfigurationKeys.WORK_UNIT_WORKING_STATE_KEY, WorkingState.PENDING.toString()));
}
/**
* Set the current runtime state of the {@link WorkUnit}.
*
* @param state {@link WorkingState} of the {@link WorkUnit}
*/
public void setWorkingState(WorkingState state) {
setProp(ConfigurationKeys.WORK_UNIT_WORKING_STATE_KEY, state.toString());
}
/**
* Get the actual high {@link Watermark} as a {@link JsonElement}.
*
* @return a {@link JsonElement} representing the actual high {@link Watermark},
* or {@code null} if the actual high {@link Watermark} is not set.
*/
public JsonElement getActualHighWatermark() {
if (!contains(ConfigurationKeys.WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY)) {
return null;
}
return JSON_PARSER.parse(getProp(ConfigurationKeys.WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY));
}
/**
* Get the actual high {@link Watermark}. If the {@code WorkUnitState} does not contain the actual high watermark
* (which may be caused by task failures), the low watermark in the corresponding {@link WorkUnit} will be returned.
*
* @param watermarkClass the watermark class for this {@code WorkUnitState}.
* @param gson a {@link Gson} object used to deserialize the watermark.
* @return the actual high watermark in this {@code WorkUnitState}. null is returned if this {@code WorkUnitState}
* does not contain an actual high watermark, and the corresponding {@code WorkUnit} does not contain a low
* watermark.
*/
public <T extends Watermark> T getActualHighWatermark(Class<T> watermarkClass, Gson gson) {
JsonElement json = getActualHighWatermark();
if (json == null) {
json = this.workUnit.getLowWatermark();
if (json == null) {
return null;
}
}
return gson.fromJson(json, watermarkClass);
}
/**
* Get the actual high {@link Watermark}. If the {@code WorkUnitState} does not contain the actual high watermark
* (which may be caused by task failures), the low watermark in the corresponding {@link WorkUnit} will be returned.
*
* <p>A default {@link Gson} object will be used to deserialize the watermark.</p>
*
* @param watermarkClass the watermark class for this {@code WorkUnitState}.
* @return the actual high watermark in this {@code WorkUnitState}. null is returned if this {@code WorkUnitState}
* does not contain an actual high watermark, and the corresponding {@code WorkUnit} does not contain a low
* watermark.
*/
public <T extends Watermark> T getActualHighWatermark(Class<T> watermarkClass) {
return getActualHighWatermark(watermarkClass, GSON);
}
/**
* This method should set the actual, runtime high {@link Watermark} for this {@link WorkUnitState}. A high
* {@link Watermark} indicates that all data for the source has been pulled up to a specific point.
*
* <p>
* This method should be called inside the {@link org.apache.gobblin.source.extractor.Extractor} class, during the initialization
* of the class, before any calls to {@link org.apache.gobblin.source.extractor.Extractor#readRecord(Object)} are executed. This
* method keeps a local point to the given {@link Watermark} and expects the following invariant to always be upheld.
* The invariant for this {@link Watermark} is that it should cover all records up to and including the most recent
* record returned by {@link org.apache.gobblin.source.extractor.Extractor#readRecord(Object)}.
* </p>
* <p>
* The {@link Watermark} set in this method may be polled by the framework multiple times, in order to track the
* progress of how the {@link Watermark} changes. This is important for reporting percent completion of a
* {@link org.apache.gobblin.source.workunit.WorkUnit}.
* </p>
*
* TODO - Once we are ready to make a backwards incompatible change to the {@link org.apache.gobblin.source.extractor.Extractor}
* interface, this method should become part of the {@link org.apache.gobblin.source.extractor.Extractor} interface. For example,
* a method such as getCurrentHighWatermark() should be added.
*/
public void setActualHighWatermark(Watermark watermark) {
/**
* TODO
*
* Hack until a state-store migration can be done. The watermark is converted to a {@link String} and then stored
* internally in via a configuration key. Once a state-store migration can be done, the {@link Watermark} can be
* stored as Binary JSON.
*/
setProp(ConfigurationKeys.WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY, watermark.toJson().toString());
}
/**
* Backoff the actual high watermark to the low watermark returned by {@link WorkUnit#getLowWatermark()}.
*/
public void backoffActualHighWatermark() {
JsonElement lowWatermark = this.workUnit.getLowWatermark();
if (lowWatermark == null) {
return;
}
setProp(ConfigurationKeys.WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY, lowWatermark.toString());
}
/**
* Get the high watermark as set in {@link org.apache.gobblin.source.extractor.Extractor}.
*
* @return high watermark
* @deprecated use {@link #getActualHighWatermark}.
*/
@Deprecated
public long getHighWaterMark() {
return getPropAsLong(ConfigurationKeys.WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK,
ConfigurationKeys.DEFAULT_WATERMARK_VALUE);
}
/**
* Set the high watermark.
*
* @param value high watermark
* @deprecated use {@link #setActualHighWatermark(Watermark)}.
*/
@Deprecated
public void setHighWaterMark(long value) {
setProp(ConfigurationKeys.WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK, value);
}
@Override
public Properties getProperties() {
Properties props = new Properties();
props.putAll(this.jobState.getProperties());
props.putAll(this.workUnit.getProperties());
props.putAll(super.getProperties());
return props;
}
@Override
public String getProp(String key) {
String value = super.getProp(key);
if (value == null) {
value = this.workUnit.getProp(key);
}
if (value == null) {
value = this.jobState.getProp(key);
}
return value;
}
@Override
public String getProp(String key, String def) {
String value = super.getProp(key);
if (value == null) {
value = this.workUnit.getProp(key);
}
if (value == null) {
value = this.jobState.getProp(key, def);
}
return value;
}
/**
* @deprecated Use {@link #getProp(String)}
*/
@Deprecated
@Override
protected String getProperty(String key) {
return getProp(key);
}
/**
* @deprecated Use {@link #getProp(String, String)}
*/
@Deprecated
@Override
protected String getProperty(String key, String def) {
return getProp(key, def);
}
@Override
public Set<String> getPropertyNames() {
Set<String> set = Sets.newHashSet(super.getPropertyNames());
set.addAll(this.workUnit.getPropertyNames());
set.addAll(this.jobState.getPropertyNames());
return set;
}
@Override
public boolean contains(String key) {
return super.contains(key) || this.workUnit.contains(key) || this.jobState.contains(key);
}
@Override
public void removeProp(String key) {
super.removeProp(key);
this.workUnit.removeProp(key);
this.jobState.removeProp(key);
}
@Override
public void removePropsWithPrefix(String prefix) {
super.removePropsWithPrefix(prefix);
this.workUnit.removePropsWithPrefix(prefix);
this.jobState.removePropsWithPrefix(prefix);
}
/**
* Get the {@link org.apache.gobblin.source.workunit.Extract} associated with the {@link WorkUnit}.
*
* @return {@link org.apache.gobblin.source.workunit.Extract} associated with the {@link WorkUnit}
*/
public Extract getExtract() {
return new Extract(this.workUnit.getExtract());
}
/**
* Get properties set in the previous run for the same table as the {@link WorkUnit}.
*
* @return properties as a {@link State} object
*/
public State getPreviousTableState() {
return getExtract().getPreviousTableState();
}
public void setJobState(State jobState) {
this.jobState = jobState;
}
@Override
public void readFields(DataInput in) throws IOException {
this.workUnit.readFields(in);
super.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
this.workUnit.write(out);
super.write(out);
}
@Override
public boolean equals(Object object) {
if (!(object instanceof WorkUnitState)) {
return false;
}
WorkUnitState other = (WorkUnitState) object;
return ((this.workUnit == null && other.workUnit == null)
|| (this.workUnit != null && this.workUnit.equals(other.workUnit)))
&& ((this.jobState == null && other.jobState == null)
|| (this.jobState != null && this.jobState.equals(other.jobState)))
&& super.equals(other);
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + (this.workUnit == null ? 0 : this.workUnit.hashCode());
return result;
}
@Override
public String toString() {
return super.toString() + "\nWorkUnit: " + getWorkunit().toString() + "\nExtract: " + getExtract().toString()
+ "\nJobState: " + this.jobState.toString();
}
/**
* Adds all properties from {@link org.apache.gobblin.configuration.State} to this {@link org.apache.gobblin.configuration.WorkUnitState}.
*
* <p>
* A property with name "property" will be added to this object with the key
* "{@link #FINAL_CONSTRUCT_STATE_PREFIX}[.<infix>].property"
* </p>
*
* @param infix Optional infix used for the name of the property in the {@link org.apache.gobblin.configuration.WorkUnitState}.
* @param finalConstructState {@link org.apache.gobblin.configuration.State} for which all properties should be added to this
* object.
*/
public void addFinalConstructState(String infix, State finalConstructState) {
for (String property : finalConstructState.getPropertyNames()) {
if (Strings.isNullOrEmpty(infix)) {
setProp(FINAL_CONSTRUCT_STATE_PREFIX + property, finalConstructState.getProp(property));
} else {
setProp(FINAL_CONSTRUCT_STATE_PREFIX + infix + "." + property, finalConstructState.getProp(property));
}
}
}
/**
* Builds a State containing all properties added with {@link #addFinalConstructState}
* to this {@link org.apache.gobblin.configuration.WorkUnitState}. All such properties will be stripped of
* {@link #FINAL_CONSTRUCT_STATE_PREFIX} but not of any infixes.
*
* <p>
* For example, if state={sample.property: sampleValue}
* then
* <pre>
* {@code
* this.addFinalConstructState("infix",state);
* this.getFinalConstructState();
* }
* </pre>
* will return state={infix.sample.property: sampleValue}
* </p>
*
* @return State containing all properties added with {@link #addFinalConstructState}.
*/
public State getFinalConstructStates() {
State constructState = new State();
for (String property : getPropertyNames()) {
if (property.startsWith(FINAL_CONSTRUCT_STATE_PREFIX)) {
constructState.setProp(property.substring(FINAL_CONSTRUCT_STATE_PREFIX.length()), getProp(property));
}
}
return constructState;
}
}
| 1,971 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/CombinedWorkUnitAndDatasetStateFunctional.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
/**
* A {@link FunctionalInterface} to return {@link CombinedWorkUnitAndDatasetState}.
*/
@FunctionalInterface
public interface CombinedWorkUnitAndDatasetStateFunctional {
/**
*
* @param datasetUrn the dataset urn
* @return an instance {@link CombinedWorkUnitAndDatasetState}. If datasetUrn is null or empty, then return latest {@link WorkUnitState}s and DatasetStates of all datasetUrns in the state store; else return the previous {@link WorkUnitState} and the DatasetState of the specified datasetUrn.
* @throws Exception the exception
*/
public CombinedWorkUnitAndDatasetState getCombinedWorkUnitAndDatasetState(String datasetUrn)
throws Exception;
}
| 1,972 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/CombinedWorkUnitAndDatasetState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* A class that encapsulates {@link WorkUnitState} and DatasetStates.
*/
@AllArgsConstructor
@Getter
public class CombinedWorkUnitAndDatasetState {
private List<WorkUnitState> previousWorkUnitStates;
private Map<String, ? extends SourceState> previousDatasetStatesByUrns;
}
| 1,973 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/ConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.nio.charset.Charset;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Charsets;
/**
* A central place for all Gobblin configuration property keys.
*/
public class ConfigurationKeys {
/**
* System configuration properties.
*/
// Default file system URI for all file storage
// Overwritable by more specific configuration properties
public static final String FS_URI_KEY = "fs.uri";
// Local file system URI
public static final String LOCAL_FS_URI = "file:///";
// Comma-separated list of framework jars to include
public static final String FRAMEWORK_JAR_FILES_KEY = "framework.jars";
public static final String PST_TIMEZONE_NAME = "America/Los_Angeles";
/**
* State store configuration properties.
*/
// State store type. References an alias or factory class name
public static final String STATE_STORE_TYPE_KEY = "state.store.type";
public static final String DATASET_STATE_STORE_PREFIX = "dataset";
public static final String DATASET_STATE_STORE_TYPE_KEY = DATASET_STATE_STORE_PREFIX + ".state.store.type";
public static final String STATE_STORE_FACTORY_CLASS_KEY = "state.store.factory.class";
public static final String INTERMEDIATE_STATE_STORE_PREFIX = "intermediate";
public static final String INTERMEDIATE_STATE_STORE_TYPE_KEY = INTERMEDIATE_STATE_STORE_PREFIX + ".state.store.type";
public static final String DEFAULT_STATE_STORE_TYPE = "fs";
public static final String STATE_STORE_TYPE_NOOP = "noop";
// are the job.state files stored using the state store?
public static final String JOB_STATE_IN_STATE_STORE = "state.store.jobStateInStateStore";
public static final boolean DEFAULT_JOB_STATE_IN_STATE_STORE = false;
public static final String CONFIG_RUNTIME_PREFIX = "gobblin.config.runtime.";
// Root directory where task state files are stored
public static final String STATE_STORE_ROOT_DIR_KEY = "state.store.dir";
// File system URI for file-system-based task store
public static final String STATE_STORE_FS_URI_KEY = "state.store.fs.uri";
// Thread pool size for listing dataset state store
public static final String THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE =
"state.store.threadpoolSizeOfListingFsDatasetStateStore";
public static final int DEFAULT_THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE = 10;
// Enable / disable state store
public static final String STATE_STORE_ENABLED = "state.store.enabled";
public static final String STATE_STORE_COMPRESSED_VALUES_KEY = "state.store.compressedValues";
public static final boolean DEFAULT_STATE_STORE_COMPRESSED_VALUES = true;
// DB state store configuration
public static final String STATE_STORE_DB_JDBC_DRIVER_KEY = "state.store.db.jdbc.driver";
public static final String DEFAULT_STATE_STORE_DB_JDBC_DRIVER = "com.mysql.cj.jdbc.Driver";
public static final String STATE_STORE_DB_URL_KEY = "state.store.db.url";
public static final String STATE_STORE_DB_USER_KEY = "state.store.db.user";
public static final String STATE_STORE_DB_PASSWORD_KEY = "state.store.db.password";
public static final String STATE_STORE_DB_TABLE_KEY = "state.store.db.table";
public static final String DEFAULT_STATE_STORE_DB_TABLE = "gobblin_job_state";
public static final String MYSQL_GET_MAX_RETRIES = "mysql.get.max.retries";
public static final int DEFAULT_MYSQL_GET_MAX_RETRIES = 3;
public static final String DATASETURN_STATESTORE_NAME_PARSER = "state.store.datasetUrnStateStoreNameParser";
/**
* Job scheduler configuration properties.
*/
// Job retriggering
public static final String JOB_RETRIGGERING_ENABLED = "job.retriggering.enabled";
public static final String DEFAULT_JOB_RETRIGGERING_ENABLED = "true";
public static final String LOAD_SPEC_BATCH_SIZE = "load.spec.batch.size";
public static final int DEFAULT_LOAD_SPEC_BATCH_SIZE = 500;
public static final String SKIP_SCHEDULING_FLOWS_AFTER_NUM_DAYS = "skip.scheduling.flows.after.num.days";
public static final int DEFAULT_NUM_DAYS_TO_SKIP_AFTER = 365;
// Mysql Dag Action Store configuration
public static final String MYSQL_DAG_ACTION_STORE_PREFIX = "MysqlDagActionStore.";
public static final String MYSQL_DAG_ACTION_STORE_TABLE_RETENTION_PERIOD_SECONDS_KEY = MYSQL_DAG_ACTION_STORE_PREFIX + "retentionPeriodSeconds";
public static final long DEFAULT_MYSQL_DAG_ACTION_STORE_TABLE_RETENTION_PERIOD_SEC_KEY = 3 * 24 * 60 * 60; // (3 days in seconds)
// Scheduler lease determination store configuration
public static final String MYSQL_LEASE_ARBITER_PREFIX = "MysqlMultiActiveLeaseArbiter";
public static final String MULTI_ACTIVE_SCHEDULER_CONSTANTS_DB_TABLE_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".constantsTable";
public static final String DEFAULT_MULTI_ACTIVE_SCHEDULER_CONSTANTS_DB_TABLE = "gobblin_multi_active_scheduler_constants_store";
public static final String SCHEDULER_LEASE_DETERMINATION_STORE_DB_TABLE_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".schedulerLeaseArbiter.store.db.table";
public static final String DEFAULT_SCHEDULER_LEASE_DETERMINATION_STORE_DB_TABLE = "gobblin_scheduler_lease_determination_store";
public static final String SCHEDULER_LEASE_DETERMINATION_TABLE_RETENTION_PERIOD_MILLIS_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".retentionPeriodMillis";
public static final long DEFAULT_SCHEDULER_LEASE_DETERMINATION_TABLE_RETENTION_PERIOD_MILLIS = 3 * 24 * 60 * 60 * 1000; // (3 days in ms)
// Refers to the event we originally tried to acquire a lease which achieved `consensus` among participants through
// the database
public static final String SCHEDULER_PRESERVED_CONSENSUS_EVENT_TIME_MILLIS_KEY = "preservedConsensusEventTimeMillis";
// Time the reminder event Trigger is supposed to fire from the scheduler
public static final String SCHEDULER_EXPECTED_REMINDER_TIME_MILLIS_KEY = "expectedReminderTimeMillis";
// Event time of flow action to orchestrate using the multi-active lease arbiter
public static final String ORCHESTRATOR_TRIGGER_EVENT_TIME_MILLIS_KEY = "orchestratorTriggerEventTimeMillis";
public static final String ORCHESTRATOR_TRIGGER_EVENT_TIME_NEVER_SET_VAL = "-1";
public static final String FLOW_IS_REMINDER_EVENT_KEY = "isReminderEvent";
public static final String SCHEDULER_EVENT_EPSILON_MILLIS_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".epsilonMillis";
public static final int DEFAULT_SCHEDULER_EVENT_EPSILON_MILLIS = 2000;
// Note: linger should be on the order of seconds even though we measure in millis
public static final String SCHEDULER_EVENT_LINGER_MILLIS_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".lingerMillis";
public static final int DEFAULT_SCHEDULER_EVENT_LINGER_MILLIS = 90000;
public static final String SCHEDULER_MAX_BACKOFF_MILLIS_KEY = MYSQL_LEASE_ARBITER_PREFIX + ".maxBackoffMillis";
public static final int DEFAULT_SCHEDULER_MAX_BACKOFF_MILLIS = 10000;
// Job executor thread pool size
public static final String JOB_EXECUTOR_THREAD_POOL_SIZE_KEY = "jobexecutor.threadpool.size";
public static final int DEFAULT_JOB_EXECUTOR_THREAD_POOL_SIZE = 5;
// Job configuration file monitor polling interval in milliseconds
public static final String JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL_KEY = "jobconf.monitor.interval";
public static final long DEFAULT_JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL = 30000;
public static final long DISABLED_JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL = -1L;
// Directory where all job configuration files are stored WHEN ALL confs reside in local FS.
public static final String JOB_CONFIG_FILE_DIR_KEY = "jobconf.dir";
// Path where all job configuration files stored
public static final String JOB_CONFIG_FILE_GENERAL_PATH_KEY = "jobconf.fullyQualifiedPath";
// Job configuration file extensions
public static final String JOB_CONFIG_FILE_EXTENSIONS_KEY = "jobconf.extensions";
public static final String DEFAULT_JOB_CONFIG_FILE_EXTENSIONS = "pull,job";
// Whether the scheduler should wait for running jobs to complete during shutdown.
// Note this only applies to jobs scheduled by the built-in Quartz-based job scheduler.
public static final String SCHEDULER_WAIT_FOR_JOB_COMPLETION_KEY = "scheduler.wait.for.job.completion";
public static final String DEFAULT_SCHEDULER_WAIT_FOR_JOB_COMPLETION = Boolean.TRUE.toString();
public static final String TASK_TIMEOUT_SECONDS = "task.timeout.seconds";
public static final long DEFAULT_TASK_TIMEOUT_SECONDS = 60 * 60;
/**
* Task executor and state tracker configuration properties.
*/
public static final String TASK_EXECUTOR_THREADPOOL_SIZE_KEY = "taskexecutor.threadpool.size";
public static final String TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE_KEY = "tasktracker.threadpool.coresize";
public static final String TASK_RETRY_THREAD_POOL_CORE_SIZE_KEY = "taskretry.threadpool.coresize";
public static final int DEFAULT_TASK_EXECUTOR_THREADPOOL_SIZE = 2;
public static final int DEFAULT_TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE = 1;
public static final int DEFAULT_TASK_RETRY_THREAD_POOL_CORE_SIZE = 1;
/**
* Common flow configuration properties.
*/
public static final String FLOW_NAME_KEY = "flow.name";
public static final String FLOW_GROUP_KEY = "flow.group";
public static final String FLOW_EDGE_ID_KEY = "flow.edgeId";
public static final String FLOW_DESCRIPTION_KEY = "flow.description";
public static final String FLOW_EXECUTION_ID_KEY = "flow.executionId";
public static final String FLOW_FAILURE_OPTION = "flow.failureOption";
public static final String FLOW_APPLY_RETENTION = "flow.applyRetention";
public static final String FLOW_APPLY_INPUT_RETENTION = "flow.applyInputRetention";
public static final String FLOW_ALLOW_CONCURRENT_EXECUTION = "flow.allowConcurrentExecution";
public static final String FLOW_EXPLAIN_KEY = "flow.explain";
public static final String FLOW_UNSCHEDULE_KEY = "flow.unschedule";
public static final String FLOW_OWNING_GROUP_KEY = "flow.owningGroup";
public static final String FLOW_SPEC_EXECUTOR = "flow.edge.specExecutors";
/**
* Common topology configuration properties.
*/
public static final String TOPOLOGY_NAME_KEY = "topology.name";
public static final String TOPOLOGY_GROUP_KEY = "topology.group";
public static final String TOPOLOGY_DESCRIPTION_KEY = "topology.description";
/**
* Common job configuration properties.
*/
public static final String JOB_NAME_KEY = "job.name";
public static final String JOB_GROUP_KEY = "job.group";
public static final String JOB_TAG_KEY = "job.tag";
public static final String JOB_DESCRIPTION_KEY = "job.description";
public static final String JOB_CURRENT_ATTEMPTS = "job.currentAttempts";
public static final String JOB_CURRENT_GENERATION = "job.currentGeneration";
// Job launcher type
public static final String JOB_LAUNCHER_TYPE_KEY = "launcher.type";
public static final String JOB_SCHEDULE_KEY = "job.schedule";
public static final String JOB_LISTENERS_KEY = "job.listeners";
// Type of the job lock
public static final String JOB_LOCK_TYPE = "job.lock.type";
//Directory that stores task staging data and task output data.
public static final String TASK_DATA_ROOT_DIR_KEY = "task.data.root.dir";
public static final String SOURCE_CLASS_KEY = "source.class";
public static final String CONVERTER_CLASSES_KEY = "converter.classes";
public static final String RECORD_STREAM_PROCESSOR_CLASSES_KEY = "recordStreamProcessor.classes";
public static final String FORK_OPERATOR_CLASS_KEY = "fork.operator.class";
public static final String DEFAULT_FORK_OPERATOR_CLASS = "org.apache.gobblin.fork.IdentityForkOperator";
public static final String JOB_COMMIT_POLICY_KEY = "job.commit.policy";
public static final String DEFAULT_JOB_COMMIT_POLICY = "full";
// If true, commit of different datasets will be performed in parallel
// only turn on if publisher is thread-safe
public static final String PARALLELIZE_DATASET_COMMIT = "job.commit.parallelize";
public static final boolean DEFAULT_PARALLELIZE_DATASET_COMMIT = false;
/** Only applicable if {@link #PARALLELIZE_DATASET_COMMIT} is true. */
public static final String DATASET_COMMIT_THREADS = "job.commit.parallelCommits";
public static final int DEFAULT_DATASET_COMMIT_THREADS = 20;
public static final String WORK_UNIT_RETRY_POLICY_KEY = "workunit.retry.policy";
public static final String WORK_UNIT_RETRY_ENABLED_KEY = "workunit.retry.enabled";
public static final String WORK_UNIT_CREATION_TIME_IN_MILLIS = "workunit.creation.time.in.millis";
public static final String WORK_UNIT_CREATION_AND_RUN_INTERVAL = "workunit.creation.and.run.interval";
public static final String WORK_UNIT_ENABLE_TRACKING_LOGS = "workunit.enableTrackingLogs";
public static final String JOB_DEPENDENCIES = "job.dependencies";
public static final String JOB_FORK_ON_CONCAT = "job.forkOnConcat";
public static final String JOB_RUN_ONCE_KEY = "job.runonce";
public static final String JOB_DISABLED_KEY = "job.disabled";
public static final String JOB_JAR_FILES_KEY = "job.jars";
public static final String JOB_LOCAL_FILES_KEY = "job.local.files";
public static final String JOB_HDFS_FILES_KEY = "job.hdfs.files";
public static final String JOB_JAR_HDFS_FILES_KEY = "job.hdfs.jars";
public static final String JOB_LOCK_ENABLED_KEY = "job.lock.enabled";
public static final String JOB_MAX_FAILURES_KEY = "job.max.failures";
public static final int DEFAULT_JOB_MAX_FAILURES = 1;
public static final String MAX_TASK_RETRIES_KEY = "task.maxretries";
public static final int DEFAULT_MAX_TASK_RETRIES = 5;
public static final String TASK_RETRY_INTERVAL_IN_SEC_KEY = "task.retry.intervalinsec";
public static final long DEFAULT_TASK_RETRY_INTERVAL_IN_SEC = 300;
public static final String OVERWRITE_CONFIGS_IN_STATESTORE = "overwrite.configs.in.statestore";
public static final boolean DEFAULT_OVERWRITE_CONFIGS_IN_STATESTORE = false;
public static final String CLEANUP_STAGING_DATA_PER_TASK = "cleanup.staging.data.per.task";
public static final boolean DEFAULT_CLEANUP_STAGING_DATA_PER_TASK = true;
public static final String CLEANUP_STAGING_DATA_BY_INITIALIZER = "cleanup.staging.data.by.initializer";
public static final String CLEANUP_OLD_JOBS_DATA = "cleanup.old.job.data";
public static final boolean DEFAULT_CLEANUP_OLD_JOBS_DATA = false;
public static final String MAXIMUM_JAR_COPY_RETRY_TIMES_KEY = JOB_JAR_FILES_KEY + ".uploading.retry.maximum";
public static final String USER_DEFINED_STATIC_STAGING_DIR = "user.defined.static.staging.dir";
public static final String USER_DEFINED_STAGING_DIR_FLAG = "user.defined.staging.dir.flag";
public static final String QUEUED_TASK_TIME_MAX_SIZE = "taskexecutor.queued_task_time.history.max_size";
public static final int DEFAULT_QUEUED_TASK_TIME_MAX_SIZE = 2048;
public static final String QUEUED_TASK_TIME_MAX_AGE = "taskexecutor.queued_task_time.history.max_age";
public static final long DEFAULT_QUEUED_TASK_TIME_MAX_AGE = TimeUnit.HOURS.toMillis(1);
/**
* Optional property to specify whether existing data in databases can be overwritten during ingestion jobs
*/
public static final String ALLOW_JDBC_RECORD_OVERWRITE = "allow.jdbc.record.overwrite";
/**
* Optional property to specify a default Authenticator class for a job
*/
public static final String DEFAULT_AUTHENTICATOR_CLASS = "job.default.authenticator.class";
/** Optional, for user to specified which template to use, inside .job file */
public static final String JOB_TEMPLATE_PATH = "job.template";
/**
* Configuration property used only for job configuration file's template
*/
public static final String REQUIRED_ATRRIBUTES_LIST = "gobblin.template.required_attributes";
/**
* Configuration for emitting job events
*/
public static final String EVENT_METADATA_GENERATOR_CLASS_KEY = "event.metadata.generator.class";
public static final String DEFAULT_EVENT_METADATA_GENERATOR_CLASS_KEY = "noop";
/**
* Configuration for dynamic configuration generation
*/
public static final String DYNAMIC_CONFIG_GENERATOR_CLASS_KEY = "dynamicConfigGenerator.class";
public static final String DEFAULT_DYNAMIC_CONFIG_GENERATOR_CLASS_KEY = "noop";
/**
* Configuration properties used internally.
*/
public static final String JOB_ID_KEY = "job.id";
public static final String JOB_KEY_KEY = "job.key";
public static final String TASK_ID_KEY = "task.id";
public static final String TASK_KEY_KEY = "task.key";
public static final String TASK_START_TIME_MILLIS_KEY = "task.startTimeMillis";
public static final String TASK_ATTEMPT_ID_KEY = "task.AttemptId";
public static final String JOB_CONFIG_FILE_PATH_KEY = "job.config.path";
public static final String TASK_FAILURE_EXCEPTION_KEY = "task.failure.exception";
public static final String TASK_ISSUES_KEY = "task.issues";
public static final String JOB_FAILURE_EXCEPTION_KEY = "job.failure.exception";
public static final String TASK_RETRIES_KEY = "task.retries";
public static final String TASK_IGNORE_CLOSE_FAILURES = "task.ignoreCloseFailures";
//A boolean config to allow skipping task interrupt on cancellation. Useful for example when thread manages
// a Kafka consumer which when interrupted during a poll() leaves the consumer in a corrupt state that prevents
// the consumer being closed subsequently, leading to a potential resource leak.
public static final String TASK_INTERRUPT_ON_CANCEL = "task.interruptOnCancel";
public static final String JOB_FAILURES_KEY = "job.failures";
public static final String JOB_TRACKING_URL_KEY = "job.tracking.url";
public static final String FORK_STATE_KEY = "fork.state";
public static final String JOB_STATE_FILE_PATH_KEY = "job.state.file.path";
public static final String JOB_STATE_DISTRIBUTED_CACHE_NAME = "job.state.distributed.cache.name";
/**
* Dataset-related configuration properties;
*/
// This property is used to specify the URN of a dataset a job or WorkUnit extracts data for
public static final String DATASET_URN_KEY = "dataset.urn";
public static final String GLOBAL_WATERMARK_DATASET_URN = "__globalDatasetWatermark";
public static final String DEFAULT_DATASET_URN = "";
/**
* Work unit related configuration properties.
*/
public static final String WORK_UNIT_LOW_WATER_MARK_KEY = "workunit.low.water.mark";
public static final String WORK_UNIT_HIGH_WATER_MARK_KEY = "workunit.high.water.mark";
public static final String WORK_UNIT_SKIP_KEY = "workunit.skip";
/**
* Work unit runtime state related configuration properties.
*/
public static final String WORK_UNIT_WORKING_STATE_KEY = "workunit.working.state";
public static final String WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK = "workunit.state.runtime.high.water.mark";
public static final String WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY = "workunit.state.actual.high.water.mark";
public static final String WORK_UNIT_DATE_PARTITION_KEY = "workunit.source.date.partition";
public static final String WORK_UNIT_DATE_PARTITION_NAME = "workunit.source.date.partitionName";
public static final String WORK_UNIT_GENERATOR_FAILURE_IS_FATAL = "workunit.generator.failure.is.fatal";
public static final boolean DEFAULT_WORK_UNIT_FAST_FAIL_ENABLED = true;
/**
* Task execution properties.
*/
public static final String TASK_SYNCHRONOUS_EXECUTION_MODEL_KEY = "task.execution.synchronousExecutionModel";
public static final boolean DEFAULT_TASK_SYNCHRONOUS_EXECUTION_MODEL = true;
/**
* Watermark interval related configuration properties.
*/
public static final String WATERMARK_INTERVAL_VALUE_KEY = "watermark.interval.value";
/**
* Extract related configuration properties.
*/
public static final String EXTRACT_TABLE_TYPE_KEY = "extract.table.type";
public static final String EXTRACT_NAMESPACE_NAME_KEY = "extract.namespace";
public static final String EXTRACT_TABLE_NAME_KEY = "extract.table.name";
public static final String EXTRACT_EXTRACT_ID_KEY = "extract.extract.id";
public static final String EXTRACT_IS_FULL_KEY = "extract.is.full";
public static final String DEFAULT_EXTRACT_IS_FULL = "false";
public static final String EXTRACT_FULL_RUN_TIME_KEY = "extract.full.run.time";
public static final String EXTRACT_PRIMARY_KEY_FIELDS_KEY = "extract.primary.key.fields";
public static final String EXTRACT_DELTA_FIELDS_KEY = "extract.delta.fields";
public static final String EXTRACT_SCHEMA = "extract.schema";
public static final String EXTRACT_LIMIT_ENABLED_KEY = "extract.limit.enabled";
public static final boolean DEFAULT_EXTRACT_LIMIT_ENABLED = false;
public static final String EXTRACT_ID_TIME_ZONE = "extract.extractIdTimeZone";
public static final String DEFAULT_EXTRACT_ID_TIME_ZONE = "UTC";
public static final String EXTRACT_SALESFORCE_BULK_API_MIN_WAIT_TIME_IN_MILLIS_KEY =
"extract.salesforce.bulkApi.minWaitTimeInMillis";
public static final long DEFAULT_EXTRACT_SALESFORCE_BULK_API_MIN_WAIT_TIME_IN_MILLIS = 60 * 1000L; // 1 min
public static final String EXTRACT_SALESFORCE_BULK_API_MAX_WAIT_TIME_IN_MILLIS_KEY =
"extract.salesforce.bulkApi.maxWaitTimeInMillis";
public static final long DEFAULT_EXTRACT_SALESFORCE_BULK_API_MAX_WAIT_TIME_IN_MILLIS = 10 * 60 * 1000L; // 10 min
/**
* Converter configuration properties.
*/
public static final String CONVERTER_AVRO_DATE_FORMAT = "converter.avro.date.format";
public static final String CONVERTER_AVRO_DATE_TIMEZONE = "converter.avro.date.timezone";
public static final String CONVERTER_AVRO_TIME_FORMAT = "converter.avro.time.format";
public static final String CONVERTER_AVRO_TIMESTAMP_FORMAT = "converter.avro.timestamp.format";
public static final String CONVERTER_AVRO_BINARY_CHARSET = "converter.avro.binary.charset";
public static final String CONVERTER_AVRO_MAX_CONVERSION_FAILURES = "converter.avro.max.conversion.failures";
public static final long DEFAULT_CONVERTER_AVRO_MAX_CONVERSION_FAILURES = 0;
public static final String CONVERTER_CSV_TO_JSON_DELIMITER = "converter.csv.to.json.delimiter";
public static final String CONVERTER_FILTER_FIELD_NAME = "converter.filter.field";
public static final String CONVERTER_FILTER_FIELD_VALUE = "converter.filter.value";
public static final String CONVERTER_IS_EPOCH_TIME_IN_SECONDS = "converter.is.epoch.time.in.seconds";
public static final String CONVERTER_AVRO_EXTRACTOR_FIELD_PATH = "converter.avro.extractor.field.path";
public static final String CONVERTER_STRING_FILTER_PATTERN = "converter.string.filter.pattern";
public static final String CONVERTER_STRING_SPLITTER_DELIMITER = "converter.string.splitter.delimiter";
public static final String CONVERTER_STRING_SPLITTER_SHOULD_TRIM_RESULTS =
"converter.string.splitter.shouldITrimResults";
public static final boolean DEFAULT_CONVERTER_STRING_SPLITTER_SHOULD_TRIM_RESULTS = false;
public static final String CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR = "converter.csv.to.json.enclosedchar";
public static final String DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR = "\0";
public static final String CONVERTER_AVRO_FIELD_PICK_FIELDS = "converter.avro.fields";
public static final String CONVERTER_AVRO_JDBC_ENTRY_FIELDS_PAIRS = "converter.avro.jdbc.entry_fields_pairs";
public static final String CONVERTER_SKIP_FAILED_RECORD = "converter.skipFailedRecord";
public static final String CONVERTER_AVRO_SCHEMA_KEY = "converter.avroSchema";
public static final String CONVERTER_IGNORE_FIELDS = "converter.ignoreFields";
/**
* Fork operator configuration properties.
*/
public static final String FORK_BRANCHES_KEY = "fork.branches";
public static final String FORK_BRANCH_NAME_KEY = "fork.branch.name";
public static final String FORK_BRANCH_ID_KEY = "fork.branch.id";
public static final String DEFAULT_FORK_BRANCH_NAME = "fork_";
public static final String FORK_RECORD_QUEUE_CAPACITY_KEY = "fork.record.queue.capacity";
public static final int DEFAULT_FORK_RECORD_QUEUE_CAPACITY = 100;
public static final String FORK_RECORD_QUEUE_TIMEOUT_KEY = "fork.record.queue.timeout";
public static final long DEFAULT_FORK_RECORD_QUEUE_TIMEOUT = 1000;
public static final String FORK_RECORD_QUEUE_TIMEOUT_UNIT_KEY = "fork.record.queue.timeout.unit";
public static final String DEFAULT_FORK_RECORD_QUEUE_TIMEOUT_UNIT = TimeUnit.MILLISECONDS.name();
public static final String FORK_MAX_WAIT_MININUTES = "fork.max.wait.minutes";
public static final long DEFAULT_FORK_MAX_WAIT_MININUTES = 60;
public static final String FORK_FINISHED_CHECK_INTERVAL = "fork.finished.check.interval";
public static final long DEFAULT_FORK_FINISHED_CHECK_INTERVAL = 1000;
public static final String FORK_CLOSE_WRITER_ON_COMPLETION = "fork.closeWriterOnCompletion";
public static final boolean DEFAULT_FORK_CLOSE_WRITER_ON_COMPLETION = false;
/**
* Writer configuration properties.
*/
public static final String WRITER_PREFIX = "writer";
public static final String WRITER_DESTINATION_TYPE_KEY = WRITER_PREFIX + ".destination.type";
public static final String WRITER_OUTPUT_FORMAT_KEY = WRITER_PREFIX + ".output.format";
public static final String WRITER_FILE_SYSTEM_URI = WRITER_PREFIX + ".fs.uri";
public static final String WRITER_STAGING_DIR = WRITER_PREFIX + ".staging.dir";
public static final String WRITER_STAGING_TABLE = WRITER_PREFIX + ".staging.table";
public static final String WRITER_TRUNCATE_STAGING_TABLE = WRITER_PREFIX + ".truncate.staging.table";
public static final String WRITER_OUTPUT_DIR = WRITER_PREFIX + ".output.dir";
public static final String WRITER_BUILDER_CLASS = WRITER_PREFIX + ".builder.class";
public static final String DEFAULT_WRITER_BUILDER_CLASS = "org.apache.gobblin.writer.AvroDataWriterBuilder";
public static final String WRITER_FILE_NAME = WRITER_PREFIX + ".file.name";
public static final String WRITER_FILE_PATH = WRITER_PREFIX + ".file.path";
public static final String WRITER_FILE_PATH_TYPE = WRITER_PREFIX + ".file.path.type";
public static final String WRITER_FILE_OWNER = WRITER_PREFIX + ".file.owner";
public static final String WRITER_FILE_GROUP = WRITER_PREFIX + ".file.group";
public static final String WRITER_FILE_REPLICATION_FACTOR = WRITER_PREFIX + ".file.replication.factor";
public static final String WRITER_FILE_BLOCK_SIZE = WRITER_PREFIX + ".file.block.size";
public static final String WRITER_FILE_PERMISSIONS = WRITER_PREFIX + ".file.permissions";
public static final String WRITER_DIR_PERMISSIONS = WRITER_PREFIX + ".dir.permissions";
public static final String WRITER_BUFFER_SIZE = WRITER_PREFIX + ".buffer.size";
public static final String WRITER_PRESERVE_FILE_NAME = WRITER_PREFIX + ".preserve.file.name";
public static final String WRITER_DEFLATE_LEVEL = WRITER_PREFIX + ".deflate.level";
public static final String WRITER_CODEC_TYPE = WRITER_PREFIX + ".codec.type";
public static final String WRITER_EAGER_INITIALIZATION_KEY = WRITER_PREFIX + ".eager.initialization";
public static final String WRITER_PARTITIONER_CLASS = WRITER_PREFIX + ".partitioner.class";
public static final String WRITER_SKIP_NULL_RECORD = WRITER_PREFIX + ".skipNullRecord";
public static final boolean DEFAULT_WRITER_EAGER_INITIALIZATION = false;
public static final String WRITER_GROUP_NAME = WRITER_PREFIX + ".group.name";
public static final String DEFAULT_WRITER_FILE_BASE_NAME = "part";
public static final int DEFAULT_DEFLATE_LEVEL = 9;
public static final int DEFAULT_BUFFER_SIZE = 4096;
public static final String DEFAULT_WRITER_FILE_PATH_TYPE = "default";
public static final String SIMPLE_WRITER_DELIMITER = "simple.writer.delimiter";
public static final String SIMPLE_WRITER_PREPEND_SIZE = "simple.writer.prepend.size";
public static final String WRITER_ADD_TASK_TIMESTAMP = WRITER_PREFIX + ".addTaskTimestamp";
// Internal use only - used to send metadata to publisher
public static final String WRITER_METADATA_KEY = WRITER_PREFIX + "._internal.metadata";
public static final String WRITER_PARTITION_PATH_KEY = WRITER_PREFIX + "._internal.partition.path";
/**
* Writer configuration properties used internally.
*/
public static final String WRITER_FINAL_OUTPUT_FILE_PATHS = WRITER_PREFIX + ".final.output.file.paths";
public static final String WRITER_RECORDS_WRITTEN = WRITER_PREFIX + ".records.written";
public static final String WRITER_BYTES_WRITTEN = WRITER_PREFIX + ".bytes.written";
public static final String WRITER_EARLIEST_TIMESTAMP = WRITER_PREFIX + ".earliest.timestamp";
public static final String WRITER_AVERAGE_TIMESTAMP = WRITER_PREFIX + ".average.timestamp";
public static final String WRITER_COUNT_METRICS_FROM_FAILED_TASKS = WRITER_PREFIX + ".jobTaskSummary.countMetricsFromFailedTasks";
/**
* Configuration properties used by the quality checker.
*/
public static final String QUALITY_CHECKER_PREFIX = "qualitychecker";
public static final String TASK_LEVEL_POLICY_LIST = QUALITY_CHECKER_PREFIX + ".task.policies";
public static final String TASK_LEVEL_POLICY_LIST_TYPE = QUALITY_CHECKER_PREFIX + ".task.policy.types";
public static final String ROW_LEVEL_POLICY_LIST = QUALITY_CHECKER_PREFIX + ".row.policies";
public static final String ROW_LEVEL_POLICY_LIST_TYPE = QUALITY_CHECKER_PREFIX + ".row.policy.types";
public static final String ROW_LEVEL_ERR_FILE = QUALITY_CHECKER_PREFIX + ".row.err.file";
public static final String QUALITY_CHECKER_TIMEZONE = QUALITY_CHECKER_PREFIX + ".timezone";
public static final String DEFAULT_QUALITY_CHECKER_TIMEZONE = PST_TIMEZONE_NAME;
public static final String CLEAN_ERR_DIR = QUALITY_CHECKER_PREFIX + ".clean.err.dir";
public static final boolean DEFAULT_CLEAN_ERR_DIR = false;
/** Set the approximate max number of records to write in err_file for each task. Note the actual number of records
* written may be anything from 0 to about the value set + 100. */
public static final String ROW_LEVEL_ERR_FILE_RECORDS_PER_TASK =
QUALITY_CHECKER_PREFIX + ".row.errFile.recordsPerTask";
public static final long DEFAULT_ROW_LEVEL_ERR_FILE_RECORDS_PER_TASK = 1000000;
/**
* Configuration properties used by the row count policies.
*/
public static final String EXTRACTOR_ROWS_EXTRACTED = QUALITY_CHECKER_PREFIX + ".rows.extracted";
public static final String EXTRACTOR_ROWS_EXPECTED = QUALITY_CHECKER_PREFIX + ".rows.expected";
public static final String WRITER_ROWS_WRITTEN = QUALITY_CHECKER_PREFIX + ".rows.written";
public static final String ROW_COUNT_RANGE = QUALITY_CHECKER_PREFIX + ".row.count.range";
/**
* Configuration properties for the task status.
*/
public static final String TASK_STATUS_REPORT_INTERVAL_IN_MS_KEY = "task.status.reportintervalinms";
public static final long DEFAULT_TASK_STATUS_REPORT_INTERVAL_IN_MS = 30000;
/**
* Configuration properties for the data publisher.
*/
public static final String DATA_PUBLISHER_PREFIX = "data.publisher";
/**
* Metadata configuration
*
* PUBLISH_WRITER_METADATA_KEY: Whether or not to publish writer-generated metadata
* PUBLISH_WRITER_METADATA_MERGER_NAME_KEY: Class to use to merge writer-generated metadata.
*/
public static final String DATA_PUBLISH_WRITER_METADATA_KEY = DATA_PUBLISHER_PREFIX + ".metadata.publish.writer";
public static final String DATA_PUBLISH_WRITER_METADATA_MERGER_NAME_KEY =
DATA_PUBLISHER_PREFIX + ".metadata.publish.writer.merger.class";
/**
* Metadata configuration properties used internally
*/
public static final String DATA_PUBLISH_WRITER_METADATA_MERGER_NAME_DEFAULT =
"org.apache.gobblin.metadata.types.GlobalMetadataJsonMerger";
public static final String DATA_PUBLISHER_METADATA_OUTPUT_DIR = DATA_PUBLISHER_PREFIX + ".metadata.output.dir";
//Metadata String in the configuration file
public static final String DATA_PUBLISHER_METADATA_STR = DATA_PUBLISHER_PREFIX + ".metadata.string";
public static final String DATA_PUBLISHER_METADATA_OUTPUT_FILE = DATA_PUBLISHER_PREFIX + ".metadata.output_file";
/**
* @deprecated Use {@link #TASK_DATA_PUBLISHER_TYPE} and {@link #JOB_DATA_PUBLISHER_TYPE}.
*/
@Deprecated
public static final String DATA_PUBLISHER_TYPE = DATA_PUBLISHER_PREFIX + ".type";
public static final String JOB_DATA_PUBLISHER_TYPE = DATA_PUBLISHER_PREFIX + ".job.type";
public static final String TASK_DATA_PUBLISHER_TYPE = DATA_PUBLISHER_PREFIX + ".task.type";
public static final String DEFAULT_DATA_PUBLISHER_TYPE = "org.apache.gobblin.publisher.BaseDataPublisher";
public static final String DATA_PUBLISHER_FILE_SYSTEM_URI = DATA_PUBLISHER_PREFIX + ".fs.uri";
public static final String DATA_PUBLISHER_FINAL_DIR = DATA_PUBLISHER_PREFIX + ".final.dir";
public static final String DATA_PUBLISHER_APPEND_EXTRACT_TO_FINAL_DIR =
DATA_PUBLISHER_PREFIX + ".appendExtractToFinalDir";
public static final boolean DEFAULT_DATA_PUBLISHER_APPEND_EXTRACT_TO_FINAL_DIR = true;
public static final String DATA_PUBLISHER_REPLACE_FINAL_DIR = DATA_PUBLISHER_PREFIX + ".replace.final.dir";
public static final String DATA_PUBLISHER_FINAL_NAME = DATA_PUBLISHER_PREFIX + ".final.name";
public static final String DATA_PUBLISHER_OVERWRITE_ENABLED = DATA_PUBLISHER_PREFIX + ".overwrite.enabled";
// @DATA_PUBLISHER_FINAL_DIR is the final publishing root directory
// @DATA_PUBLISHER_FINAL_DIR_GROUP is set at the leaf level (DATA_PUBLISHER_FINAL_DIR/EXTRACT/file.xxx) which is incorrect
// Use @DATA_PUBLISHER_OUTPUT_DIR_GROUP to set group at output dir level @DATA_PUBLISHER_FINAL_DIR/EXTRACT
@Deprecated
public static final String DATA_PUBLISHER_FINAL_DIR_GROUP = DATA_PUBLISHER_PREFIX + ".final.dir.group";
public static final String DATA_PUBLISHER_OUTPUT_DIR_GROUP = DATA_PUBLISHER_PREFIX + ".output.dir.group";
public static final String DATA_PUBLISHER_PERMISSIONS = DATA_PUBLISHER_PREFIX + ".permissions";
public static final String PUBLISH_DATA_AT_JOB_LEVEL = "publish.data.at.job.level";
public static final boolean DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL = true;
public static final String PUBLISHER_DIRS = DATA_PUBLISHER_PREFIX + ".output.dirs";
public static final String DATA_PUBLISHER_CAN_BE_SKIPPED = DATA_PUBLISHER_PREFIX + ".canBeSkipped";
public static final boolean DEFAULT_DATA_PUBLISHER_CAN_BE_SKIPPED = false;
public static final String PUBLISHER_LATEST_FILE_ARRIVAL_TIMESTAMP =
DATA_PUBLISHER_PREFIX + ".latest.file.arrival.timestamp";
/**
* Dynamically configured Publisher properties used internally
*/
//Dataset-specific final publish location
public static final String DATA_PUBLISHER_DATASET_DIR = DATA_PUBLISHER_PREFIX + ".dataset.dir";
/**
* Configuration properties used by the extractor.
*/
public static final String SOURCE_ENTITY = "source.entity";
public static final String SCHEMA_IN_SOURCE_DIR = "schema.in.source.dir";
public static final boolean DEFAULT_SCHEMA_IN_SOURCE_DIR = false;
public static final String SCHEMA_FILENAME = "schema.filename";
public static final String DEFAULT_SCHEMA_FILENAME = "metadata.json";
// An optional configuration for extractor's specific implementation to set, which helps data writer
// tune some parameters that are relevant to the record size.
// See the reference GobblinOrcWriter as an example.
public static final String AVG_RECORD_SIZE = "avg.record.size";
// Comma-separated source entity names
public static final String SOURCE_ENTITIES = "source.entities";
public static final String SOURCE_TIMEZONE = "source.timezone";
public static final String SOURCE_SCHEMA = "source.schema";
public static final String SOURCE_MAX_NUMBER_OF_PARTITIONS = "source.max.number.of.partitions";
public static final String SOURCE_SKIP_FIRST_RECORD = "source.skip.first.record";
public static final String SOURCE_COLUMN_NAME_CASE = "source.column.name.case";
public static final String SOURCE_EARLY_STOP_ENABLED = "source.earlyStop.enabled";
public static final boolean DEFAULT_SOURCE_EARLY_STOP_ENABLED = false;
/**
* Configuration properties used by the QueryBasedExtractor.
*/
public static final String SOURCE_QUERYBASED_WATERMARK_TYPE = "source.querybased.watermark.type";
public static final String SOURCE_QUERYBASED_HOUR_COLUMN = "source.querybased.hour.column";
public static final String SOURCE_QUERYBASED_SKIP_HIGH_WATERMARK_CALC = "source.querybased.skip.high.watermark.calc";
public static final String SOURCE_QUERYBASED_QUERY = "source.querybased.query";
public static final String SOURCE_QUERYBASED_EXCLUDED_COLUMNS = "source.querybased.excluded.columns";
public static final String SOURCE_QUERYBASED_IS_HOURLY_EXTRACT = "source.querybased.hourly.extract";
public static final String SOURCE_QUERYBASED_EXTRACT_TYPE = "source.querybased.extract.type";
public static final String SOURCE_QUERYBASED_PARTITION_INTERVAL = "source.querybased.partition.interval";
public static final String SOURCE_QUERYBASED_START_VALUE = "source.querybased.start.value";
public static final String SOURCE_QUERYBASED_END_VALUE = "source.querybased.end.value";
public static final String SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT =
"source.querybased.append.max.watermark.limit";
public static final String SOURCE_QUERYBASED_IS_WATERMARK_OVERRIDE = "source.querybased.is.watermark.override";
public static final String SOURCE_QUERYBASED_LOW_WATERMARK_BACKUP_SECS =
"source.querybased.low.watermark.backup.secs";
public static final String SOURCE_QUERYBASED_SCHEMA = "source.querybased.schema";
public static final String SOURCE_QUERYBASED_FETCH_SIZE = "source.querybased.fetch.size";
public static final String SOURCE_QUERYBASED_IS_SPECIFIC_API_ACTIVE = "source.querybased.is.specific.api.active";
public static final String SOURCE_QUERYBASED_SKIP_COUNT_CALC = "source.querybased.skip.count.calc";
public static final String SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED =
"source.querybased.is.metadata.column.check.enabled";
public static final String SOURCE_QUERYBASED_IS_COMPRESSION_ENABLED = "source.querybased.is.compression.enabled";
public static final String SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE =
"source.querybased.jdbc.resultset.fetch.size";
public static final String SOURCE_QUERYBASED_ALLOW_REMOVE_UPPER_BOUNDS = "source.querybased.allowRemoveUpperBounds";
public static final String SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT =
"source.querybased.promoteUnsignedIntToBigInt";
public static final boolean DEFAULT_SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT = false;
public static final String SOURCE_QUERYBASED_RESET_EMPTY_PARTITION_WATERMARK =
"source.querybased.resetEmptyPartitionWatermark";
public static final boolean DEFAULT_SOURCE_QUERYBASED_RESET_EMPTY_PARTITION_WATERMARK = true;
public static final String ENABLE_DELIMITED_IDENTIFIER = "enable.delimited.identifier";
public static final boolean DEFAULT_ENABLE_DELIMITED_IDENTIFIER = false;
public static final String SQL_SERVER_CONNECTION_PARAMETERS = "source.querybased.sqlserver.connectionParameters";
/**
* Configuration properties used by the CopySource.
*/
public static final String COPY_SOURCE_FILESET_WU_GENERATOR_CLASS = "copy.source.fileset.wu.generator.class";
public static final String COPY_EXPECTED_SCHEMA = "gobblin.copy.expectedSchema";
/**
* Configuration properties used by the FileBasedExtractor
*/
public static final String SOURCE_FILEBASED_DATA_DIRECTORY = "source.filebased.data.directory";
public static final String SOURCE_FILEBASED_PLATFORM = "source.filebased.platform";
public static final String SOURCE_FILEBASED_FILES_TO_PULL = "source.filebased.files.to.pull";
public static final String SOURCE_FILEBASED_MAX_FILES_PER_RUN = "source.filebased.maxFilesPerRun";
public static final String SOURCE_FILEBASED_FS_SNAPSHOT = "source.filebased.fs.snapshot";
public static final String SOURCE_FILEBASED_FS_URI = "source.filebased.fs.uri";
public static final String SOURCE_FILEBASED_PRESERVE_FILE_NAME = "source.filebased.preserve.file.name";
public static final String SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS = "source.filebased.downloader.class";
public static final String SOURCE_FILEBASED_ENCRYPTED_CONFIG_PATH = "source.filebased.encrypted";
public static final String SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED =
"source.filebased.fs.prior.snapshot.required";
public static final boolean DEFAULT_SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED = false;
/**
* Configuration properties used internally by the KafkaSource.
*/
public static final String OFFSET_TOO_EARLY_COUNT = "offset.too.early.count";
public static final String OFFSET_TOO_LATE_COUNT = "offset.too.late.count";
public static final String FAIL_TO_GET_OFFSET_COUNT = "fail.to.get.offset.count";
/**
* Configuration properties used internally by the KafkaExtractor.
*/
public static final String ERROR_PARTITION_COUNT = "error.partition.count";
public static final String ERROR_MESSAGE_UNDECODABLE_COUNT = "error.message.undecodable.count";
/**
* Configuration properties for source connection.
*/
public static final String SOURCE_CONN_PREFIX = "source.conn.";
public static final String SOURCE_CONN_USE_AUTHENTICATION = SOURCE_CONN_PREFIX + "use.authentication";
public static final String SOURCE_CONN_PRIVATE_KEY = SOURCE_CONN_PREFIX + "private.key";
public static final String SOURCE_CONN_KNOWN_HOSTS = SOURCE_CONN_PREFIX + "known.hosts";
public static final String SOURCE_CONN_CLIENT_SECRET = SOURCE_CONN_PREFIX + "client.secret";
public static final String SOURCE_CONN_CLIENT_ID = SOURCE_CONN_PREFIX + "client.id";
public static final String SOURCE_CONN_DOMAIN = SOURCE_CONN_PREFIX + "domain";
public static final String SOURCE_CONN_USERNAME = SOURCE_CONN_PREFIX + "username";
public static final String SOURCE_CONN_PASSWORD = SOURCE_CONN_PREFIX + "password";
public static final String SOURCE_CONN_SECURITY_TOKEN = SOURCE_CONN_PREFIX + "security.token";
public static final String SOURCE_CONN_HOST_NAME = SOURCE_CONN_PREFIX + "host";
public static final String SOURCE_CONN_VERSION = SOURCE_CONN_PREFIX + "version";
public static final String SOURCE_CONN_TIMEOUT = SOURCE_CONN_PREFIX + "timeout";
public static final String SOURCE_CONN_PROPERTIES = SOURCE_CONN_PREFIX + "properties";
public static final String SOURCE_CONN_REST_URL = SOURCE_CONN_PREFIX + "rest.url";
public static final String SOURCE_CONN_USE_PROXY_URL = SOURCE_CONN_PREFIX + "use.proxy.url";
public static final String SOURCE_CONN_USE_PROXY_PORT = SOURCE_CONN_PREFIX + "use.proxy.port";
public static final String SOURCE_CONN_DRIVER = SOURCE_CONN_PREFIX + "driver";
public static final String SOURCE_CONN_PORT = SOURCE_CONN_PREFIX + "port";
public static final int SOURCE_CONN_DEFAULT_PORT = 22;
public static final String SOURCE_CONN_SID = SOURCE_CONN_PREFIX + "sid";
public static final String SOURCE_CONN_REFRESH_TOKEN = SOURCE_CONN_PREFIX + "refresh.token";
public static final String SOURCE_CONN_DECRYPT_CLIENT_SECRET = SOURCE_CONN_PREFIX + "decrypt.client.id.secret";
/**
* Source default configurations.
*/
public static final long DEFAULT_WATERMARK_VALUE = -1;
public static final int DEFAULT_MAX_NUMBER_OF_PARTITIONS = 20;
public static final int DEFAULT_SOURCE_FETCH_SIZE = 1000;
public static final String DEFAULT_WATERMARK_TYPE = "timestamp";
public static final String DEFAULT_LOW_WATERMARK_BACKUP_SECONDS = "1000";
public static final int DEFAULT_CONN_TIMEOUT = 500000;
public static final String ESCAPE_CHARS_IN_COLUMN_NAME = "$,&";
public static final String ESCAPE_CHARS_IN_TABLE_NAME = "$,&";
public static final String DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL = "'$WATERMARK'";
public static final String DEFAULT_SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED = "true";
public static final String DEFAULT_COLUMN_NAME_CASE = "NOCHANGE";
public static final int DEFAULT_SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE = 1000;
public static final String FILEBASED_REPORT_STATUS_ON_COUNT = "filebased.report.status.on.count";
public static final int DEFAULT_FILEBASED_REPORT_STATUS_ON_COUNT = 10000;
public static final String DEFAULT_SOURCE_TIMEZONE = PST_TIMEZONE_NAME;
/**
* Configuration properties used by the Hadoop MR job launcher.
*/
public static final String MR_JOB_ROOT_DIR_KEY = "mr.job.root.dir";
/** Specifies a static location in HDFS to upload jars to. Useful for sharing jars across different Gobblin runs.*/
@Deprecated // Deprecated; use MR_JARS_BASE_DIR instead
public static final String MR_JARS_DIR = "mr.jars.dir";
// dir pointed by MR_JARS_BASE_DIR has month partitioned dirs to store jar files and are cleaned up on a regular basis
// retention feature is not available for dir pointed by MR_JARS_DIR
public static final String MR_JARS_BASE_DIR = "mr.jars.base.dir";
public static final String MR_JOB_MAX_MAPPERS_KEY = "mr.job.max.mappers";
public static final String MR_JOB_MAPPER_FAILURE_IS_FATAL_KEY = "mr.job.map.failure.is.fatal";
public static final String MR_PERSIST_WORK_UNITS_THEN_CANCEL_KEY = "mr.persist.workunits.then.cancel";
public static final String MR_TARGET_MAPPER_SIZE = "mr.target.mapper.size";
public static final String MR_REPORT_METRICS_AS_COUNTERS_KEY = "mr.report.metrics.as.counters";
public static final boolean DEFAULT_MR_REPORT_METRICS_AS_COUNTERS = false;
public static final int DEFAULT_MR_JOB_MAX_MAPPERS = 100;
public static final boolean DEFAULT_MR_JOB_MAPPER_FAILURE_IS_FATAL = false;
public static final boolean DEFAULT_MR_PERSIST_WORK_UNITS_THEN_CANCEL = false;
public static final boolean DEFAULT_ENABLE_MR_SPECULATIVE_EXECUTION = false;
/**
* Configuration properties used by the distributed job launcher.
*/
public static final String TASK_STATE_COLLECTOR_INTERVAL_SECONDS = "task.state.collector.interval.secs";
public static final int DEFAULT_TASK_STATE_COLLECTOR_INTERVAL_SECONDS = 60;
public static final String TASK_STATE_COLLECTOR_HANDLER_CLASS = "task.state.collector.handler.class";
public static final String REPORT_JOB_PROGRESS = "report.job.progress";
public static final boolean DEFAULT_REPORT_JOB_PROGRESS = false;
public static final double DEFAULT_PROGRESS_REPORTING_THRESHOLD = 0.05;
/**
* Set to true so that job still proceed if TaskStateCollectorService failed.
*/
public static final String JOB_PROCEED_ON_TASK_STATE_COLLECOTR_SERVICE_FAILURE =
"job.proceed.onTaskStateCollectorServiceFailure";
/**
* Configuration properties for email settings.
*/
public static final String ALERT_EMAIL_ENABLED_KEY = "email.alert.enabled";
public static final String NOTIFICATION_EMAIL_ENABLED_KEY = "email.notification.enabled";
public static final String EMAIL_HOST_KEY = "email.host";
public static final String DEFAULT_EMAIL_HOST = "localhost";
public static final String EMAIL_SMTP_PORT_KEY = "email.smtp.port";
public static final String EMAIL_USER_KEY = "email.user";
public static final String EMAIL_PASSWORD_KEY = "email.password";
public static final String EMAIL_FROM_KEY = "email.from";
public static final String EMAIL_TOS_KEY = "email.tos";
/**
* Common metrics configuration properties.
*/
public static final String METRICS_CONFIGURATIONS_PREFIX = "metrics.";
public static final String METRICS_ENABLED_KEY = METRICS_CONFIGURATIONS_PREFIX + "enabled";
public static final String DEFAULT_METRICS_ENABLED = Boolean.toString(true);
public static final String METRICS_REPORT_INTERVAL_KEY = METRICS_CONFIGURATIONS_PREFIX + "report.interval";
public static final String DEFAULT_METRICS_REPORT_INTERVAL = Long.toString(TimeUnit.SECONDS.toMillis(30));
public static final String METRIC_CONTEXT_NAME_KEY = "metrics.context.name";
public static final String METRIC_TIMER_WINDOW_SIZE_IN_MINUTES =
METRICS_CONFIGURATIONS_PREFIX + "timer.window.size.in.minutes";
public static final int DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES = 15;
public static final String METRICS_REPORTING_CONFIGURATIONS_PREFIX = "metrics.reporting";
public static final String METRICS_REPORTING_EVENTS_CONFIGURATIONS_PREFIX =
METRICS_REPORTING_CONFIGURATIONS_PREFIX + ".events";
//Configuration keys to trigger job/task failures on metric reporter instantiation failures. Useful
//when monitoring of Gobblin pipelines critically depend on events and metrics emitted by the metrics
//reporting service running in each container.
public static final String GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL = "gobblin.task.isMetricReportingFailureFatal";
public static final boolean DEFAULT_GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL = false;
public static final String GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL = "gobblin.task.isEventReportingFailureFatal";
public static final boolean DEFAULT_GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL = false;
public static final String GOBBLIN_JOB_METRIC_REPORTING_FAILURE_FATAL = "gobblin.job.isMetricReportingFailureFatal";
public static final boolean DEFAULT_GOBBLIN_JOB_METRIC_REPORTING_FAILURE_FATAL = false;
public static final String GOBBLIN_JOB_EVENT_REPORTING_FAILURE_FATAL = "gobblin.job.isEventReportingFailureFatal";
public static final boolean DEFAULT_GOBBLIN_JOB_EVENT_REPORTING_FAILURE_FATAL = false;
// File-based reporting
public static final String METRICS_REPORTING_FILE_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.file.enabled";
public static final String DEFAULT_METRICS_REPORTING_FILE_ENABLED = Boolean.toString(false);
public static final String METRICS_LOG_DIR_KEY = METRICS_CONFIGURATIONS_PREFIX + "log.dir";
public static final String METRICS_FILE_SUFFIX = METRICS_CONFIGURATIONS_PREFIX + "reporting.file.suffix";
public static final String DEFAULT_METRICS_FILE_SUFFIX = "";
public static final String FAILURE_REPORTING_FILE_ENABLED_KEY = "failure.reporting.file.enabled";
public static final String DEFAULT_FAILURE_REPORTING_FILE_ENABLED = Boolean.toString(true);
public static final String FAILURE_LOG_DIR_KEY = "failure.log.dir";
// JMX-based reporting
public static final String METRICS_REPORTING_JMX_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.jmx.enabled";
public static final String DEFAULT_METRICS_REPORTING_JMX_ENABLED = Boolean.toString(false);
// Kafka-based reporting
public static final String METRICS_REPORTING_KAFKA_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.enabled";
public static final String DEFAULT_METRICS_REPORTING_KAFKA_ENABLED = Boolean.toString(false);
public static final String METRICS_REPORTING_KAFKA_METRICS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.metrics.enabled";
public static final String METRICS_REPORTING_KAFKA_EVENTS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.events.enabled";
public static final String DEFAULT_METRICS_REPORTING_KAFKA_REPORTER_CLASS =
"org.apache.gobblin.metrics.kafka.KafkaMetricReporterFactory";
public static final String DEFAULT_EVENTS_REPORTING_KAFKA_REPORTER_CLASS =
"org.apache.gobblin.metrics.kafka.KafkaEventReporterFactory";
public static final String METRICS_REPORTING_KAFKA_FORMAT = METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.format";
public static final String METRICS_REPORTING_EVENTS_KAFKA_FORMAT =
METRICS_CONFIGURATIONS_PREFIX + "reporting.events.kafka.format";
public static final String METRICS_REPORTING_KAFKAPUSHERKEYS =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafkaPusherKeys";
public static final String METRICS_REPORTING_EVENTS_KAFKAPUSHERKEYS =
METRICS_CONFIGURATIONS_PREFIX + "reporting.events.kafkaPusherKeys";
public static final String DEFAULT_METRICS_REPORTING_KAFKA_FORMAT = "json";
public static final String METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.avro.use.schema.registry";
public static final String METRICS_REPORTING_EVENTS_KAFKA_AVRO_SCHEMA_ID =
METRICS_CONFIGURATIONS_PREFIX + "reporting.events.kafka.avro.schemaId";
public static final String METRICS_REPORTING_METRICS_KAFKA_AVRO_SCHEMA_ID =
METRICS_CONFIGURATIONS_PREFIX + "reporting.metrics.kafka.avro.schemaId";
public static final String DEFAULT_METRICS_REPORTING_KAFKA_USE_SCHEMA_REGISTRY = Boolean.toString(false);
public static final String METRICS_KAFKA_BROKERS = METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.brokers";
// Topic used for both event and metric reporting.
// Can be overriden by METRICS_KAFKA_TOPIC_METRICS and METRICS_KAFKA_TOPIC_EVENTS.
public static final String METRICS_KAFKA_TOPIC = METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.topic.common";
// Topic used only for metric reporting.
public static final String METRICS_KAFKA_TOPIC_METRICS =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.topic.metrics";
// Topic used only for event reporting.
public static final String METRICS_KAFKA_TOPIC_EVENTS =
METRICS_CONFIGURATIONS_PREFIX + "reporting.kafka.topic.events";
// Key related configurations for raw metric and event key value reporters
public static final int DEFAULT_REPORTER_KEY_SIZE = 100;
public static final String METRICS_REPORTING_PUSHERKEYS = METRICS_CONFIGURATIONS_PREFIX + "reporting.pusherKeys";
public static final String METRICS_REPORTING_EVENTS_PUSHERKEYS =
METRICS_REPORTING_EVENTS_CONFIGURATIONS_PREFIX + ".pusherKeys";
//Graphite-based reporting
public static final String METRICS_REPORTING_GRAPHITE_METRICS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.metrics.enabled";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_METRICS_ENABLED = Boolean.toString(false);
public static final String METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.events.enabled";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED = Boolean.toString(false);
public static final String METRICS_REPORTING_GRAPHITE_HOSTNAME =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.hostname";
public static final String METRICS_REPORTING_GRAPHITE_PORT =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.port";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_PORT = "2003";
public static final String METRICS_REPORTING_GRAPHITE_EVENTS_PORT =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.events.port";
public static final String METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.events.value.as.key";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY = Boolean.toString(false);
public static final String METRICS_REPORTING_GRAPHITE_SENDING_TYPE =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.sending.type";
public static final String METRICS_REPORTING_GRAPHITE_PREFIX =
METRICS_CONFIGURATIONS_PREFIX + "reporting.graphite.prefix";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_PREFIX = "";
public static final String DEFAULT_METRICS_REPORTING_GRAPHITE_SENDING_TYPE = "TCP";
//InfluxDB-based reporting
public static final String METRICS_REPORTING_INFLUXDB_METRICS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.metrics.enabled";
public static final String DEFAULT_METRICS_REPORTING_INFLUXDB_METRICS_ENABLED = Boolean.toString(false);
public static final String METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED_KEY =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.events.enabled";
public static final String DEFAULT_METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED = Boolean.toString(false);
public static final String METRICS_REPORTING_INFLUXDB_URL = METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.url";
public static final String METRICS_REPORTING_INFLUXDB_DATABASE =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.database";
public static final String METRICS_REPORTING_INFLUXDB_EVENTS_DATABASE =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.events.database";
public static final String METRICS_REPORTING_INFLUXDB_USER =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.user";
public static final String METRICS_REPORTING_INFLUXDB_PASSWORD =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.password";
public static final String METRICS_REPORTING_INFLUXDB_SENDING_TYPE =
METRICS_CONFIGURATIONS_PREFIX + "reporting.influxdb.sending.type";
public static final String DEFAULT_METRICS_REPORTING_INFLUXDB_SENDING_TYPE = "TCP";
//Custom-reporting
public static final String METRICS_CUSTOM_BUILDERS = METRICS_CONFIGURATIONS_PREFIX + "reporting.custom.builders";
/**
* Rest server configuration properties.
*/
public static final String REST_SERVER_HOST_KEY = "rest.server.host";
public static final String DEFAULT_REST_SERVER_HOST = "localhost";
public static final String REST_SERVER_PORT_KEY = "rest.server.port";
public static final String DEFAULT_REST_SERVER_PORT = "8080";
public static final String REST_SERVER_ADVERTISED_URI_KEY = "rest.server.advertised.uri";
/*
* Admin server configuration properties.
*/
public static final String ADMIN_SERVER_ENABLED_KEY = "admin.server.enabled";
/** The name of the class with the admin interface. The class must implement the
* AdminWebServerFactory interface .*/
public static final String ADMIN_SERVER_FACTORY_CLASS_KEY = "admin.server.factory.type";
public static final String ADMIN_SERVER_HOST_KEY = "admin.server.host";
public static final String DEFAULT_ADMIN_SERVER_HOST = "localhost";
public static final String ADMIN_SERVER_PORT_KEY = "admin.server.port";
public static final String DEFAULT_ADMIN_SERVER_PORT = "8000";
public static final String ADMIN_SERVER_HIDE_JOBS_WITHOUT_TASKS_BY_DEFAULT_KEY =
"admin.server.hide_jobs_without_tasks_by_default.enabled";
public static final String DEFAULT_ADMIN_SERVER_HIDE_JOBS_WITHOUT_TASKS_BY_DEFAULT = "false";
public static final String ADMIN_SERVER_REFRESH_INTERVAL_KEY = "admin.server.refresh_interval";
public static final long DEFAULT_ADMIN_SERVER_REFRESH_INTERVAL = 30000;
public static final String DEFAULT_ADMIN_SERVER_FACTORY_CLASS =
"org.apache.gobblin.admin.DefaultAdminWebServerFactory";
/**
* Kafka job configurations.
*/
public static final String KAFKA_BROKERS = "kafka.brokers";
public static final String KAFKA_BROKERS_TO_SIMPLE_NAME_MAP_KEY = "kafka.brokersToSimpleNameMap";
public static final String KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS = "kafka.source.work.units.creation.threads";
public static final int KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT = 30;
public static final String KAFKA_SOURCE_SHARE_CONSUMER_CLIENT = "kafka.source.shareConsumerClient";
public static final boolean DEFAULT_KAFKA_SOURCE_SHARE_CONSUMER_CLIENT = false;
public static final String KAFKA_SOURCE_AVG_FETCH_TIME_CAP = "kakfa.source.avgFetchTimeCap";
public static final int DEFAULT_KAFKA_SOURCE_AVG_FETCH_TIME_CAP = 100;
public static final String SHARED_KAFKA_CONFIG_PREFIX = "gobblin.kafka.sharedConfig";
/**
* Kafka schema registry HTTP client configuration
*/
public static final String KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_SO_TIMEOUT =
"kafka.schema.registry.httpclient.so.timeout";
public static final String KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_CONN_TIMEOUT =
"kafka.schema.registry.httpclient.conn.timeout";
public static final String KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_COUNT =
"kafka.schema.registry.httpclient.methodRetryCount";
public static final String KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_REQUEST_RETRY_ENABLED =
"kafka.schema.registry.httpclient.requestRetryEnabled";
public static final String KAFKA_SCHEMA_REGISTRY_HTTPCLIENT_METHOD_RETRY_HANDLER_CLASS =
"kafka.schema.registry.httpclient.methodRetryHandlerClass";
/**
* Kafka schema registry retry configurations
*/
public static final String KAFKA_SCHEMA_REGISTRY_RETRY_TIMES = "kafka.schema.registry.retry.times";
public static final String KAFKA_SCHEMA_REGISTRY_RETRY_INTERVAL_IN_MILLIS =
"kafka.schema.registry.retry.interval.inMillis";
/**
* Job execution info server and history store configuration properties.
*/
// If job execution info server is enabled
public static final String JOB_EXECINFO_SERVER_ENABLED_KEY = "job.execinfo.server.enabled";
public static final String JOB_HISTORY_STORE_ENABLED_KEY = "job.history.store.enabled";
public static final String JOB_HISTORY_STORE_URL_KEY = "job.history.store.url";
public static final String JOB_HISTORY_STORE_JDBC_DRIVER_KEY = "job.history.store.jdbc.driver";
public static final String DEFAULT_JOB_HISTORY_STORE_JDBC_DRIVER = "com.mysql.cj.jdbc.Driver";
public static final String JOB_HISTORY_STORE_USER_KEY = "job.history.store.user";
public static final String DEFAULT_JOB_HISTORY_STORE_USER = "gobblin";
public static final String JOB_HISTORY_STORE_PASSWORD_KEY = "job.history.store.password";
public static final String DEFAULT_JOB_HISTORY_STORE_PASSWORD = "gobblin";
/**
* Password encryption and decryption properties.
*/
public static final String ENCRYPT_KEY_FS_URI = "encrypt.key.fs.uri";
public static final String ENCRYPT_KEY_LOC = "encrypt.key.loc";
public static final String ENCRYPT_USE_STRONG_ENCRYPTOR = "encrypt.use.strong.encryptor";
public static final boolean DEFAULT_ENCRYPT_USE_STRONG_ENCRYPTOR = false;
public static final String NUMBER_OF_ENCRYPT_KEYS = "num.encrypt.keys";
public static final int DEFAULT_NUMBER_OF_MASTER_PASSWORDS = 2;
/**
* Proxy Filesystem operation properties.
*/
public static final String SHOULD_FS_PROXY_AS_USER = "should.fs.proxy.as.user";
public static final boolean DEFAULT_SHOULD_FS_PROXY_AS_USER = false;
public static final String FS_PROXY_AS_USER_NAME = "fs.proxy.as.user.name";
public static final String FS_PROXY_AS_USER_TOKEN_FILE = "fs.proxy.as.user.token.file";
public static final String SUPER_USER_NAME_TO_PROXY_AS_OTHERS = "super.user.name.to.proxy.as.others";
public static final String SUPER_USER_KEY_TAB_LOCATION = "super.user.key.tab.location";
public static final String TOKEN_AUTH = "TOKEN";
public static final String KERBEROS_AUTH = "KERBEROS";
public static final String FS_PROXY_AUTH_METHOD = "fs.proxy.auth.method";
public static final String DEFAULT_FS_PROXY_AUTH_METHOD = TOKEN_AUTH;
public static final String KERBEROS_REALM = "kerberos.realm";
/**
* Azkaban properties.
*/
public static final String AZKABAN_EXECUTION_TIME_RANGE = "azkaban.execution.time.range";
public static final String AZKABAN_EXECUTION_DAYS_LIST = "azkaban.execution.days.list";
public static final String AZKABAN_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_JOB_ID = "azkaban.job.id";
public static final String AZKABAN_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_URL = "azkaban.link.execution.url";
public static final String AZKABAN_FLOW_URL = "azkaban.link.workflow.url";
public static final String AZKABAN_JOB_URL = "azkaban.link.job.url";
public static final String AZKABAN_JOB_EXEC_URL = "azkaban.link.jobexec.url";
public static final String AZKABAN_WEBSERVERHOST = "azkaban.webserverhost";
public static final String AZKABAN_SERVER_NAME = "azkaban.server.name";
/**
* Hive registration properties
*/
public static final String HIVE_REGISTRATION_POLICY = "hive.registration.policy";
public static final String HIVE_REG_PUBLISHER_CLASS = "hive.reg.publisher.class";
public static final String DEFAULT_HIVE_REG_PUBLISHER_CLASS =
"org.apache.gobblin.publisher.HiveRegistrationPublisher";
/**
* Config store properties
*/
public static final String CONFIG_MANAGEMENT_STORE_URI = "gobblin.config.management.store.uri";
public static final String CONFIG_MANAGEMENT_STORE_ENABLED = "gobblin.config.management.store.enabled";
public static final String DEFAULT_CONFIG_MANAGEMENT_STORE_ENABLED = "false";
/**
* Other configuration properties.
*/
public static final String GOBBLIN_RUNTIME_DELIVERY_SEMANTICS = "gobblin.runtime.delivery.semantics";
public static final Charset DEFAULT_CHARSET_ENCODING = Charsets.UTF_8;
public static final String TEST_HARNESS_LAUNCHER_IMPL = "gobblin.testharness.launcher.impl";
public static final int PERMISSION_PARSING_RADIX = 8;
// describes a comma separated list of non transient errors that may come in a gobblin job
// e.g. "invalid_grant,CredentialStoreException"
public static final String GOBBLIN_NON_TRANSIENT_ERRORS = "gobblin.errorMessages.nonTransientErrors";
/**
* Configuration properties related to Flows
*/
public static final String FLOW_RUN_IMMEDIATELY = "flow.runImmediately";
public static final String GOBBLIN_FLOW_SLA_TIME = "gobblin.flow.sla.time";
public static final String GOBBLIN_FLOW_SLA_TIME_UNIT = "gobblin.flow.sla.timeunit";
public static final String DEFAULT_GOBBLIN_FLOW_SLA_TIME_UNIT = "MINUTES";
public static final String GOBBLIN_JOB_START_SLA_TIME = "gobblin.job.start.sla.time";
public static final String GOBBLIN_JOB_START_SLA_TIME_UNIT = "gobblin.job.start.sla.timeunit";
public static final long FALLBACK_GOBBLIN_JOB_START_SLA_TIME = 10L;
public static final String FALLBACK_GOBBLIN_JOB_START_SLA_TIME_UNIT = "MINUTES";
public static final String DATASET_SUBPATHS_KEY = "gobblin.flow.dataset.subPaths";
public static final String DATASET_BASE_INPUT_PATH_KEY = "gobblin.flow.dataset.baseInputPath";
public static final String DATASET_BASE_OUTPUT_PATH_KEY = "gobblin.flow.dataset.baseOutputPath";
public static final String DATASET_COMBINE_KEY = "gobblin.flow.dataset.combine";
public static final String WHITELISTED_EDGE_IDS = "gobblin.flow.whitelistedEdgeIds";
public static final String GOBBLIN_OUTPUT_JOB_LEVEL_METRICS = "gobblin.job.outputJobLevelMetrics";
/**
* Configuration properties related to flowGraphs
*/
public static final String FLOWGRAPH_JAVA_PROPS_EXTENSIONS = "flowGraph.javaPropsExtensions";
public static final String FLOWGRAPH_HOCON_FILE_EXTENSIONS = "flowGraph.hoconFileExtensions";
public static final String DEFAULT_PROPERTIES_EXTENSIONS = "properties";
public static final String DEFAULT_CONF_EXTENSIONS = "conf";
public static final String FLOWGRAPH_POLLING_INTERVAL = "flowGraph.pollingInterval";
public static final String FLOWGRAPH_BASE_DIR = "flowGraph.configBaseDirectory";
public static final String FLOWGRAPH_ABSOLUTE_DIR = "flowGraph.absoluteDirectory";
/***
* Configuration properties related to TopologySpec Store
*/
public static final String TOPOLOGYSPEC_STORE_CLASS_KEY = "topologySpec.store.class";
public static final String TOPOLOGYSPEC_SERDE_CLASS_KEY = "topologySpec.serde.class";
public static final String TOPOLOGYSPEC_STORE_DIR_KEY = "topologySpec.store.dir";
/***
* Configuration properties related to Spec Executor Instance
*/
public static final String SPECEXECUTOR_INSTANCE_URI_KEY = "specExecInstance.uri";
public static final String SPECEXECUTOR_INSTANCE_CAPABILITIES_KEY = "specExecInstance.capabilities";
public static final String SPECEXECUTOR_CONFIGS_PREFIX_KEY = "specExecutor.additional.configs.key";
/***
* Configuration properties related to Spec Producer
*/
public static final String SPEC_PRODUCER_SERIALIZED_FUTURE = "specProducer.serialized.future";
/***
* Configuration properties related to Compaction Suite
*/
public static final String COMPACTION_PREFIX = "compaction.";
public static final String COMPACTION_SUITE_FACTORY = COMPACTION_PREFIX + "suite.factory";
public static final String DEFAULT_COMPACTION_SUITE_FACTORY = "CompactionSuiteBaseFactory";
public static final String COMPACTION_PRIORITIZATION_PREFIX = COMPACTION_PREFIX + "prioritization.";
public static final String COMPACTION_PRIORITIZER_ALIAS = COMPACTION_PRIORITIZATION_PREFIX + "prioritizerAlias";
public static final String COMPACTION_ESTIMATOR = COMPACTION_PRIORITIZATION_PREFIX + "estimator";
/***
* Configuration properties related to Re-compaction
*/
public static String RECOMPACTION_WRITE_TO_NEW_FOLDER = "recompaction.write.to.new.folder";
/**
* Configuration related to ConfigStore based copy/retention
*/
public static final String CONFIG_BASED_PREFIX = "gobblin.configBased";
/**
* Configuration related to the Git based monitoring service
*/
public static final String GIT_MONITOR_REPO_URI = "repositoryUri";
public static final String GIT_MONITOR_REPO_DIR = "repositoryDirectory";
public static final String GIT_MONITOR_CONFIG_BASE_DIR = "configBaseDirectory";
public static final String GIT_MONITOR_POLLING_INTERVAL = "pollingInterval";
public static final String GIT_MONITOR_BRANCH_NAME = "branchName";
//Configuration keys for authentication using HTTPS
public static final String GIT_MONITOR_USERNAME = "username";
public static final String GIT_MONITOR_PASSWORD = "password";
//Configuration keys for authentication using SSH with Public Key
public static final String GIT_MONITOR_SSH_WITH_PUBLIC_KEY_ENABLED = "isSshWithPublicKeyEnabled";
public static final String GIT_MONITOR_SSH_PRIVATE_KEY_PATH = "privateKeyPath";
public static final String GIT_MONITOR_SSH_PRIVATE_KEY_BASE64_ENCODED = "privateKeyBase64";
public static final String GIT_MONITOR_SSH_PASSPHRASE = "passphrase";
public static final String GIT_MONITOR_SSH_STRICT_HOST_KEY_CHECKING_ENABLED = "isStrictHostKeyCheckingEnabled";
public static final String GIT_MONITOR_SSH_KNOWN_HOSTS = "knownHosts";
public static final String GIT_MONITOR_SSH_KNOWN_HOSTS_FILE = "knownHostsFile";
public static final String GIT_MONITOR_JSCH_LOGGER_ENABLED = "isJschLoggerEnabled";
/**
* Configuration related to avro schema check strategy
*/
public static final String AVRO_SCHEMA_CHECK_STRATEGY = "avro.schema.check.strategy";
public static final String AVRO_SCHEMA_CHECK_STRATEGY_DEFAULT =
"org.apache.gobblin.util.schema_check.AvroSchemaCheckDefaultStrategy";
/**
* Configuration and constant vale for GobblinMetadataChangeEvent
*/
public static final String GOBBLIN_METADATA_CHANGE_EVENT_ENABLED = "GobblinMetadataChangeEvent.enabled";
public static final String LIST_DELIMITER_KEY = ",";
public static final String RANGE_DELIMITER_KEY = "-";
/**
* Configuration for emitting task events
*/
public static final String TASK_EVENT_METADATA_GENERATOR_CLASS_KEY = "gobblin.task.event.metadata.generator.class";
public static final String DEFAULT_TASK_EVENT_METADATA_GENERATOR_CLASS_KEY = "nooptask";
/**
* Configuration for sharded directory files
*/
public static final String USE_DATASET_LOCAL_WORK_DIR = "gobblin.useDatasetLocalWorkDir";
public static final String DESTINATION_DATASET_HANDLER_CLASS = "gobblin.destination.datasetHandlerClass";
public static final String DATASET_DESTINATION_PATH = "gobblin.dataset.destination.path";
public static final String TMP_DIR = ".temp";
public static final String TRASH_DIR = ".trash";
public static final String STAGING_DIR_DEFAULT_SUFFIX = "/" + TMP_DIR + "/taskStaging";
public static final String OUTPUT_DIR_DEFAULT_SUFFIX = "/" + TMP_DIR + "/taskOutput";
public static final String ROW_LEVEL_ERR_FILE_DEFAULT_SUFFIX = "/err";
/**
* Troubleshooter configuration
*/
/**
* Disables all troubleshooter functions
* */
public static final String TROUBLESHOOTER_DISABLED = "gobblin.troubleshooter.disabled";
/**
* Disables reporting troubleshooter issues as GobblinTrackingEvents
* */
public static final String TROUBLESHOOTER_DISABLE_EVENT_REPORTING = "gobblin.troubleshooter.disableEventReporting";
/**
* The maximum number of issues that In-memory troubleshooter repository will keep.
*
* This setting can control memory usage of the troubleshooter.
* */
public static final String TROUBLESHOOTER_IN_MEMORY_ISSUE_REPOSITORY_MAX_SIZE = "gobblin.troubleshooter.inMemoryIssueRepository.maxSize";
public static final int DEFAULT_TROUBLESHOOTER_IN_MEMORY_ISSUE_REPOSITORY_MAX_SIZE = 100;
public static final String JOB_METRICS_REPORTER_CLASS_KEY = "gobblin.job.metrics.reporter.class";
public static final String DEFAULT_JOB_METRICS_REPORTER_CLASS = "org.apache.gobblin.runtime.metrics.DefaultGobblinJobMetricReporter";
}
| 1,974 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/NoopDynamicConfigGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alias;
/**
* NoOp dynamic config generator that returns an empty {@link Config}
*/
@Alias("noop")
public class NoopDynamicConfigGenerator implements DynamicConfigGenerator {
public NoopDynamicConfigGenerator() {
}
public Config generateDynamicConfig(Config config) {
return ConfigFactory.empty();
}
}
| 1,975 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/State.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.gobblin.compat.hadoop.TextSerializer;
import org.apache.gobblin.compat.hadoop.WritableShim;
/**
* A serializable wrapper class that can be persisted for {@link Properties}.
*
* @author kgoodhop
*/
@EqualsAndHashCode(exclude = {"jsonParser"})
public class State implements WritableShim {
private static final Joiner LIST_JOINER = Joiner.on(",");
private static final Splitter LIST_SPLITTER = Splitter.on(",").trimResults().omitEmptyStrings();
private static final JsonParser JSON_PARSER = new JsonParser();
private String id;
// State contains two parts: commonProperties and specProperties (specProperties overrides commonProperties).
@Getter
private Properties commonProperties;
@Getter
private Properties specProperties;
public State() {
this.specProperties = new Properties();
this.commonProperties = new Properties();
}
public State(Properties properties) {
this.specProperties = properties;
this.commonProperties = new Properties();
}
public State(State otherState) {
this.commonProperties = otherState.getCommonProperties();
this.specProperties = new Properties();
this.specProperties.putAll(otherState.getProperties());
for (Object key : this.commonProperties.keySet()) {
if (this.specProperties.containsKey(key) && this.commonProperties.get(key).equals(this.specProperties.get(key))) {
this.specProperties.remove(key);
}
}
}
/**
* Return a copy of the underlying {@link Properties} object.
*
* @return A copy of the underlying {@link Properties} object.
*/
public Properties getProperties() {
// a.putAll(b) iterates over the entries of b. Synchronizing on b prevents concurrent modification on b.
synchronized (this.specProperties) {
Properties props = new Properties();
if (this.commonProperties != null) {
props.putAll(this.commonProperties);
}
props.putAll(this.specProperties);
return props;
}
}
/**
* Populates this instance with properties of the other instance.
*
* @param otherState the other {@link State} instance
*/
public void addAll(State otherState) {
Properties diffCommonProps = new Properties();
diffCommonProps.putAll(Maps.difference(this.commonProperties, otherState.commonProperties).entriesOnlyOnRight());
addAll(diffCommonProps);
addAll(otherState.specProperties);
}
/**
* Populates this instance with values of a {@link Properties} instance.
*
* @param properties a {@link Properties} instance
*/
public void addAll(Properties properties) {
this.specProperties.putAll(properties);
}
/**
* Add properties in a {@link State} instance that are not in the current instance.
*
* @param otherState a {@link State} instance
*/
public void addAllIfNotExist(State otherState) {
addAllIfNotExist(otherState.commonProperties);
addAllIfNotExist(otherState.specProperties);
}
/**
* Add properties in a {@link Properties} instance that are not in the current instance.
*
* @param properties a {@link Properties} instance
*/
public void addAllIfNotExist(Properties properties) {
for (String key : properties.stringPropertyNames()) {
if (!this.specProperties.containsKey(key) && !this.commonProperties.containsKey(key)) {
this.specProperties.setProperty(key, properties.getProperty(key));
}
}
}
/**
* Add properties in a {@link State} instance that are in the current instance.
*
* @param otherState a {@link State} instance
*/
public void overrideWith(State otherState) {
overrideWith(otherState.commonProperties);
overrideWith(otherState.specProperties);
}
/**
* Add properties in a {@link Properties} instance that are in the current instance.
*
* @param properties a {@link Properties} instance
*/
public void overrideWith(Properties properties) {
for (String key : properties.stringPropertyNames()) {
if (this.specProperties.containsKey(key) || this.commonProperties.containsKey(key)) {
this.specProperties.setProperty(key, properties.getProperty(key));
}
}
}
/**
* Set the id used for state persistence and logging.
*
* @param id id of this instance
*/
public void setId(String id) {
this.id = id;
}
/**
* Get the id of this instance.
*
* @return id of this instance
*/
public String getId() {
return this.id;
}
/**
* Set a property.
*
* <p>
* Both key and value are stored as strings.
* </p>
*
* @param key property key
* @param value property value
*/
public void setProp(String key, Object value) {
this.specProperties.put(key, value.toString());
}
/**
* Override existing {@link #commonProperties} and {@link #specProperties}.
* @param commonProperties
* @param specProperties
*/
public void setProps(Properties commonProperties, Properties specProperties) {
this.commonProperties = commonProperties;
this.specProperties = specProperties;
}
/**
* Appends the input value to a list property that can be retrieved with {@link #getPropAsList}.
*
* <p>
* List properties are internally stored as comma separated strings. Adding a value that contains commas (for
* example "a,b,c") will essentially add multiple values to the property ("a", "b", and "c"). This is
* similar to the way that {@link org.apache.hadoop.conf.Configuration} works.
* </p>
*
* @param key property key
* @param value property value (if it includes commas, it will be split by the commas).
*/
public synchronized void appendToListProp(String key, String value) {
if (contains(key)) {
setProp(key, LIST_JOINER.join(getProp(key), value));
} else {
setProp(key, value);
}
}
/**
* Appends the input value to a set property that can be retrieved with {@link #getPropAsSet}.
*
* <p>
* Set properties are internally stored as comma separated strings. Adding a value that contains commas (for
* example "a,b,c") will essentially add multiple values to the property ("a", "b", and "c"). This is
* similar to the way that {@link org.apache.hadoop.conf.Configuration} works.
* </p>
*
* @param key property key
* @param value property value (if it includes commas, it will be split by the commas).
*/
public synchronized void appendToSetProp(String key, String value) {
Set<String> set = value == null ? Sets.<String>newHashSet() : Sets.newHashSet(LIST_SPLITTER.splitToList(value));
if (contains(key)) {
set.addAll(getPropAsSet(key));
}
setProp(key, LIST_JOINER.join(set));
}
/**
* Get the value of a property.
*
* @param key property key
* @return value associated with the key as a string or <code>null</code> if the property is not set
*/
public String getProp(String key) {
if (this.specProperties.containsKey(key)) {
return this.specProperties.getProperty(key);
}
return this.commonProperties.getProperty(key);
}
/**
* Get the value of a property, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return value associated with the key or the default value if the property is not set
*/
public String getProp(String key, String def) {
if (this.specProperties.containsKey(key)) {
return this.specProperties.getProperty(key);
}
return this.commonProperties.getProperty(key, def);
}
/**
* Get the value of a comma separated property as a {@link List} of strings.
*
* @param key property key
* @return value associated with the key as a {@link List} of strings
*/
public List<String> getPropAsList(String key) {
return LIST_SPLITTER.splitToList(getProp(key));
}
/**
* Get the value of a property as a list of strings, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return value (the default value if the property is not set) associated with the key as a list of strings
*/
public List<String> getPropAsList(String key, String def) {
return LIST_SPLITTER.splitToList(getProp(key, def));
}
/**
* Get the value of a comma separated property as a {@link Set} of strings.
*
* @param key property key
* @return value associated with the key as a {@link Set} of strings
*/
public Set<String> getPropAsSet(String key) {
return ImmutableSet.copyOf(LIST_SPLITTER.splitToList(getProp(key)));
}
/**
* Get the value of a comma separated property as a {@link Set} of strings.
*
* @param key property key
* @param def default value
* @return value (the default value if the property is not set) associated with the key as a {@link Set} of strings
*/
public Set<String> getPropAsSet(String key, String def) {
return ImmutableSet.copyOf(LIST_SPLITTER.splitToList(getProp(key, def)));
}
/**
* Get the value of a property as a case insensitive {@link Set} of strings.
*
* @param key property key
* @return value associated with the key as a case insensitive {@link Set} of strings
*/
public Set<String> getPropAsCaseInsensitiveSet(String key) {
return ImmutableSortedSet.copyOf(String.CASE_INSENSITIVE_ORDER, LIST_SPLITTER.split(getProp(key)));
}
/**
* Get the value of a property as a case insensitive {@link Set} of strings, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return value associated with the key as a case insensitive {@link Set} of strings
*/
public Set<String> getPropAsCaseInsensitiveSet(String key, String def) {
return ImmutableSortedSet.copyOf(String.CASE_INSENSITIVE_ORDER, LIST_SPLITTER.split(getProp(key, def)));
}
/**
* Get the value of a property as a long integer.
*
* @param key property key
* @return long integer value associated with the key
*/
public long getPropAsLong(String key) {
return Long.parseLong(getProp(key));
}
/**
* Get the value of a property as a long integer, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return long integer value associated with the key or the default value if the property is not set
*/
public long getPropAsLong(String key, long def) {
return Long.parseLong(getProp(key, String.valueOf(def)));
}
/**
* Get the value of a property as an integer.
*
* @param key property key
* @return integer value associated with the key
*/
public int getPropAsInt(String key) {
return Integer.parseInt(getProp(key));
}
/**
* Get the value of a property as an integer, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return integer value associated with the key or the default value if the property is not set
*/
public int getPropAsInt(String key, int def) {
return Integer.parseInt(getProp(key, String.valueOf(def)));
}
/**
* Get the value of a property as a short.
*
* @param key property key
* @return short value associated with the key
*/
public short getPropAsShort(String key) {
return Short.parseShort(getProp(key));
}
/**
* Get the value of a property as a short.
*
* @param key property key
* @param radix radix used to parse the value
* @return short value associated with the key
*/
public short getPropAsShortWithRadix(String key, int radix) {
return Short.parseShort(getProp(key), radix);
}
/**
* Get the value of a property as an short, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return short value associated with the key or the default value if the property is not set
*/
public short getPropAsShort(String key, short def) {
return Short.parseShort(getProp(key, String.valueOf(def)));
}
/**
* Get the value of a property as an short, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @param radix radix used to parse the value
* @return short value associated with the key or the default value if the property is not set
*/
public short getPropAsShortWithRadix(String key, short def, int radix) {
return contains(key) ? Short.parseShort(getProp(key), radix) : def;
}
/**
* Get the value of a property as a double.
*
* @param key property key
* @return double value associated with the key
*/
public double getPropAsDouble(String key) {
return Double.parseDouble(getProp(key));
}
/**
* Get the value of a property as a double, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return double value associated with the key or the default value if the property is not set
*/
public double getPropAsDouble(String key, double def) {
return Double.parseDouble(getProp(key, String.valueOf(def)));
}
/**
* Get the value of a property as a boolean.
*
* @param key property key
* @return boolean value associated with the key
*/
public boolean getPropAsBoolean(String key) {
return Boolean.parseBoolean(getProp(key));
}
/**
* Get the value of a property as a boolean, using the given default value if the property is not set.
*
* @param key property key
* @param def default value
* @return boolean value associated with the key or the default value if the property is not set
*/
public boolean getPropAsBoolean(String key, boolean def) {
return Boolean.parseBoolean(getProp(key, String.valueOf(def)));
}
/**
* Get the value of a property as a {@link JsonArray}.
*
* @param key property key
* @return {@link JsonArray} value associated with the key
*/
public JsonArray getPropAsJsonArray(String key) {
JsonElement jsonElement = this.JSON_PARSER.parse(getProp(key));
Preconditions.checkArgument(jsonElement.isJsonArray(),
"Value for key " + key + " is malformed, it must be a JsonArray: " + jsonElement);
return jsonElement.getAsJsonArray();
}
/**
* Remove a property if it exists.
*
* @param key property key
*/
public void removeProp(String key) {
this.specProperties.remove(key);
if (this.commonProperties.containsKey(key)) {
// This case should not happen.
Properties commonPropsCopy = new Properties();
commonPropsCopy.putAll(this.commonProperties);
commonPropsCopy.remove(key);
this.commonProperties = commonPropsCopy;
}
}
/**
* Remove all properties with a certain keyPrefix
*
* @param prefix key prefix
*/
public void removePropsWithPrefix(String prefix) {
this.specProperties.entrySet().removeIf(entry -> ((String) entry.getKey()).startsWith(prefix));
Properties newCommonProperties = null;
for (Object key: this.commonProperties.keySet()) {
if (((String)key).startsWith(prefix)) {
if (newCommonProperties == null) {
newCommonProperties = new Properties();
newCommonProperties.putAll(this.commonProperties);
}
newCommonProperties.remove(key);
}
}
if (newCommonProperties != null) {
this.commonProperties = newCommonProperties;
}
}
/**
* @deprecated Use {@link #getProp(String)}
*/
@Deprecated
protected String getProperty(String key) {
return getProp(key);
}
/**
* @deprecated Use {@link #getProp(String, String)}
*/
@Deprecated
protected String getProperty(String key, String def) {
return getProp(key, def);
}
/**
* Get the names of all the properties set in a {@link Set}.
*
* @return names of all the properties set in a {@link Set}
*/
public Set<String> getPropertyNames() {
return Sets.newHashSet(
Iterables.concat(this.commonProperties.stringPropertyNames(), this.specProperties.stringPropertyNames()));
}
/**
* Check if a property is set.
*
* @param key property key
* @return <code>true</code> if the property is set or <code>false</code> otherwise
*/
public boolean contains(String key) {
return this.specProperties.containsKey(key) || this.commonProperties.containsKey(key);
}
@Override
public void readFields(DataInput in)
throws IOException {
int numEntries = in.readInt();
while (numEntries-- > 0) {
String key = TextSerializer.readTextAsString(in).intern();
String value = TextSerializer.readTextAsString(in).intern();
this.specProperties.put(key, value);
}
}
@Override
public void write(DataOutput out)
throws IOException {
out.writeInt(this.commonProperties.size() + this.specProperties.size());
for (Object key : this.commonProperties.keySet()) {
TextSerializer.writeStringAsText(out, (String) key);
TextSerializer.writeStringAsText(out, this.commonProperties.getProperty((String) key));
}
for (Object key : this.specProperties.keySet()) {
TextSerializer.writeStringAsText(out, (String) key);
TextSerializer.writeStringAsText(out, this.specProperties.getProperty((String) key));
}
}
@Override
public String toString() {
return "Common:" + this.commonProperties.toString() + "\n Specific: " + this.specProperties.toString();
}
}
| 1,976 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/SourceState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.Extract;
import lombok.Getter;
import lombok.Setter;
/**
* A container for all meta data related to a particular source. This includes all properties
* defined in job configuration files and all properties from tasks of the previous run.
*
* <p>
* Properties can be overwritten at runtime and persisted upon job completion. Persisted
* properties will be loaded in the next run and made available to use by the
* {@link org.apache.gobblin.source.Source}.
* </p>
*
* @author kgoodhop
*/
public class SourceState extends State {
private static final Set<Extract> EXTRACT_SET = Sets.newConcurrentHashSet();
private static final DateTimeFormatter DTF =
DateTimeFormat.forPattern("yyyyMMddHHmmss").withLocale(Locale.US).withZone(DateTimeZone.UTC);
private Map<String, SourceState> previousDatasetStatesByUrns;
private List<WorkUnitState> previousWorkUnitStates = Lists.newArrayList();
@Getter
@Setter
private SharedResourcesBroker<GobblinScopeTypes> broker;
@Getter
@Setter
private CombinedWorkUnitAndDatasetStateFunctional workUnitAndDatasetStateFunctional;
private boolean areWorkUnitStatesMaterialized;
/**
* Default constructor.
*/
public SourceState() {
this.previousWorkUnitStates = new ArrayList<>();
this.previousDatasetStatesByUrns = ImmutableMap.of();
}
/**
* Constructor.
*
* @param properties job configuration properties
*/
public SourceState(State properties) {
super.addAll(properties);
this.previousWorkUnitStates = new ArrayList<>();
this.previousDatasetStatesByUrns = ImmutableMap.of();
}
/**
* Constructor.
*
* @param properties job configuration properties
* @param previousWorkUnitStates an {@link Iterable} of {@link WorkUnitState}s of the previous job run
*/
public SourceState(State properties, Iterable<WorkUnitState> previousWorkUnitStates) {
super.addAll(properties);
this.previousDatasetStatesByUrns = ImmutableMap.of();
for (WorkUnitState workUnitState : previousWorkUnitStates) {
this.previousWorkUnitStates.add(new ImmutableWorkUnitState(workUnitState));
}
}
/**
* Constructor.
*
* @param properties job configuration properties
* @param previousDatasetStatesByUrns {@link SourceState} of the previous job run
* @param previousWorkUnitStates an {@link Iterable} of {@link WorkUnitState}s of the previous job run
*/
public SourceState(State properties, Map<String, ? extends SourceState> previousDatasetStatesByUrns,
Iterable<WorkUnitState> previousWorkUnitStates) {
super.addAll(properties.getProperties());
this.previousDatasetStatesByUrns = ImmutableMap.copyOf(previousDatasetStatesByUrns);
for (WorkUnitState workUnitState : previousWorkUnitStates) {
this.previousWorkUnitStates.add(new ImmutableWorkUnitState(workUnitState));
}
}
/**
* Get the {@link SourceState} of the previous job run.
*
* <p>
* This is a convenient method for existing jobs that do not use the new feature that allows output data to
* be committed on a per-dataset basis. Use of this method assumes that the job deals with a single dataset,
* which uses the default data URN defined by {@link ConfigurationKeys#DEFAULT_DATASET_URN}.
* </p>
*
* @return {@link SourceState} of the previous job run or {@code null} if no previous {@link SourceState} is found
*/
public SourceState getPreviousSourceState() {
return getPreviousDatasetState(ConfigurationKeys.DEFAULT_DATASET_URN);
}
/**
* Get the state (in the form of a {@link SourceState}) of a dataset identified by a dataset URN
* of the previous job run. Useful when dataset state store is enabled and we want to load the latest
* state of a global watermark dataset.
*
* @param datasetUrn the dataset URN
* @return the dataset state (in the form of a {@link SourceState}) of the previous job run
* or {@code null} if no previous dataset state is found for the given dataset URN
*/
public SourceState getPreviousDatasetState(String datasetUrn) {
if (!this.previousDatasetStatesByUrns.containsKey(datasetUrn)) {
return null;
}
return new ImmutableSourceState(this.previousDatasetStatesByUrns.get(datasetUrn));
}
/**
*
* @return a {@link Map} from dataset URNs (as being specified by {@link ConfigurationKeys#DATASET_URN_KEY}
* to the {@link SourceState} with the dataset URNs. The map is materialized upon invocation of the method
* by the source. Subsequent calls to this method will return the previously materialized map.
* <p>
* {@link SourceState}s that do not have {@link ConfigurationKeys#DATASET_URN_KEY} set will be added
* to the dataset state belonging to {@link ConfigurationKeys#DEFAULT_DATASET_URN}.
* </p>
*
* @return a {@link Map} from dataset URNs to the {@link SourceState} with the dataset URNs
*/
public Map<String, SourceState> getPreviousDatasetStatesByUrns() {
if (this.workUnitAndDatasetStateFunctional != null) {
materializeWorkUnitAndDatasetStates(null);
}
return this.previousDatasetStatesByUrns;
}
/**
* Get a {@link List} of previous {@link WorkUnitState}s. The list is lazily materialized upon invocation of the
* method by the {@link org.apache.gobblin.source.Source}. Subsequent calls to this method will return the previously
* materialized map.
*/
public List<WorkUnitState> getPreviousWorkUnitStates() {
if (this.workUnitAndDatasetStateFunctional != null) {
materializeWorkUnitAndDatasetStates(null);
}
return this.previousWorkUnitStates;
}
/**
* Get a {@link List} of previous {@link WorkUnitState}s for a given datasetUrn.
* @param datasetUrn
* @return {@link List} of {@link WorkUnitState}s.
*/
public List<WorkUnitState> getPreviousWorkUnitStates(String datasetUrn) {
if (this.workUnitAndDatasetStateFunctional != null) {
try {
CombinedWorkUnitAndDatasetState state = this.workUnitAndDatasetStateFunctional.getCombinedWorkUnitAndDatasetState(datasetUrn);
return state.getPreviousWorkUnitStates();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return this.previousWorkUnitStates;
}
/**
* Get a {@link Map} from dataset URNs (as being specified by {@link ConfigurationKeys#DATASET_URN_KEY}
* to the {@link WorkUnitState} with the dataset URNs.
*
* <p>
* {@link WorkUnitState}s that do not have {@link ConfigurationKeys#DATASET_URN_KEY} set will be added
* to the dataset state belonging to {@link ConfigurationKeys#DEFAULT_DATASET_URN}.
* </p>
*
* @return a {@link Map} from dataset URNs to the {@link WorkUnitState} with the dataset URNs
*/
public Map<String, Iterable<WorkUnitState>> getPreviousWorkUnitStatesByDatasetUrns() {
Map<String, Iterable<WorkUnitState>> previousWorkUnitStatesByDatasetUrns = Maps.newHashMap();
if (this.workUnitAndDatasetStateFunctional != null) {
materializeWorkUnitAndDatasetStates(null);
}
for (WorkUnitState workUnitState : this.previousWorkUnitStates) {
String datasetUrn =
workUnitState.getProp(ConfigurationKeys.DATASET_URN_KEY, ConfigurationKeys.DEFAULT_DATASET_URN);
if (!previousWorkUnitStatesByDatasetUrns.containsKey(datasetUrn)) {
previousWorkUnitStatesByDatasetUrns.put(datasetUrn, Lists.newArrayList());
}
((List<WorkUnitState>) previousWorkUnitStatesByDatasetUrns.get(datasetUrn)).add(workUnitState);
}
return ImmutableMap.copyOf(previousWorkUnitStatesByDatasetUrns);
}
/**
* A thread-safe method for materializing previous {@link WorkUnitState}s and DatasetStates.
* @param datasetUrn
*/
private synchronized void materializeWorkUnitAndDatasetStates(String datasetUrn) {
if (!this.areWorkUnitStatesMaterialized) {
try {
CombinedWorkUnitAndDatasetState workUnitAndDatasetState =
this.workUnitAndDatasetStateFunctional.getCombinedWorkUnitAndDatasetState(datasetUrn);
this.previousWorkUnitStates = workUnitAndDatasetState.getPreviousWorkUnitStates();
this.previousDatasetStatesByUrns =
(Map<String, SourceState>) workUnitAndDatasetState.getPreviousDatasetStatesByUrns();
this.areWorkUnitStatesMaterialized = true;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
/**
* Create a new properly populated {@link Extract} instance.
*
* <p>
* This method should always return a new unique {@link Extract} instance.
* </p>
*
* @param type {@link org.apache.gobblin.source.workunit.Extract.TableType}
* @param namespace namespace of the table this extract belongs to
* @param table name of the table this extract belongs to
* @return a new unique {@link Extract} instance
*
* @Deprecated Use {@link org.apache.gobblin.source.extractor.extract.AbstractSource#createExtract(
*org.apache.gobblin.source.workunit.Extract.TableType, String, String)}
*/
@Deprecated
public synchronized Extract createExtract(Extract.TableType type, String namespace, String table) {
Extract extract = new Extract(this, type, namespace, table);
while (EXTRACT_SET.contains(extract)) {
if (Strings.isNullOrEmpty(extract.getExtractId())) {
extract.setExtractId(DTF.print(new DateTime()));
} else {
DateTime extractDateTime = DTF.parseDateTime(extract.getExtractId());
extract.setExtractId(DTF.print(extractDateTime.plusSeconds(1)));
}
}
EXTRACT_SET.add(extract);
return extract;
}
/**
* Create a new {@link WorkUnit} instance from a given {@link Extract}.
*
* @param extract given {@link Extract}
* @return a new {@link WorkUnit} instance
*
* @deprecated Properties in SourceState should not added to a WorkUnit. Having each WorkUnit contain a copy of
* SourceState is a waste of memory. Use {@link WorkUnit#create(Extract)}.
*/
@Deprecated
public WorkUnit createWorkUnit(Extract extract) {
return new WorkUnit(this, extract);
}
@Override
public void write(DataOutput out)
throws IOException {
write(out, true);
}
public void write(DataOutput out, boolean writePreviousWorkUnitStates)
throws IOException {
if (!writePreviousWorkUnitStates) {
out.writeInt(0);
} else {
out.writeInt(this.previousWorkUnitStates.size());
for (WorkUnitState state : this.previousWorkUnitStates) {
state.write(out);
}
}
super.write(out);
}
@Override
public void readFields(DataInput in)
throws IOException {
int size = in.readInt();
for (int i = 0; i < size; i++) {
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.readFields(in);
this.previousWorkUnitStates.add(new ImmutableWorkUnitState(workUnitState));
}
super.readFields(in);
}
@Override
public boolean equals(Object object) {
if (!(object instanceof SourceState)) {
return false;
}
SourceState other = (SourceState) object;
return super.equals(other) && this.previousDatasetStatesByUrns.equals(other.previousDatasetStatesByUrns)
&& this.previousWorkUnitStates.equals(other.previousWorkUnitStates);
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + this.previousDatasetStatesByUrns.hashCode();
result = prime * result + this.previousWorkUnitStates.hashCode();
return result;
}
/**
* An immutable version of {@link SourceState} that disables all methods that may change the
* internal state of a {@link SourceState}.
*/
private static class ImmutableSourceState extends SourceState {
public ImmutableSourceState(SourceState sourceState) {
super(sourceState, sourceState.previousDatasetStatesByUrns, sourceState.previousWorkUnitStates);
}
@Override
public void readFields(DataInput in)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void setId(String id) {
throw new UnsupportedOperationException();
}
@Override
public void setProp(String key, Object value) {
throw new UnsupportedOperationException();
}
@Override
public synchronized void appendToListProp(String key, String value) {
throw new UnsupportedOperationException();
}
@Override
public void addAll(State otherState) {
throw new UnsupportedOperationException();
}
@Override
public void addAll(Properties properties) {
throw new UnsupportedOperationException();
}
}
}
| 1,977 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/configuration/DynamicConfigGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.configuration;
import com.typesafe.config.Config;
/**
* For generating dynamic configuration that gets added to the job configuration.
* These are configuration values that cannot be determined statically at job specification time.
* One example is the SSL certificate location of a certificate that is fetched at runtime.
*/
public interface DynamicConfigGenerator {
/**
* Generate dynamic configuration that should be added to the job configuration.
* @param config configuration
* @return config object with the dynamic configuration
*/
Config generateDynamicConfig(Config config);
}
| 1,978 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/password/EncryptedPasswordAuthenticator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.password;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.util.Properties;
import com.google.common.base.Preconditions;
/**
* {@link Authenticator} that uses a username and password from the provided {@link Properties} to authenticate, and
* also decrypts the password using {@link PasswordManager}
*/
public class EncryptedPasswordAuthenticator extends Authenticator {
public static final String AUTHENTICATOR_USERNAME = "authenticator.username";
public static final String AUTHENTICATOR_PASSWORD = "authenticator.password";
private final String username;
private final String password;
public EncryptedPasswordAuthenticator(Properties props) {
this.username = props.getProperty(AUTHENTICATOR_USERNAME);
this.password = PasswordManager.getInstance(props)
.readPassword(props.getProperty(AUTHENTICATOR_PASSWORD));
Preconditions.checkNotNull(this.username, AUTHENTICATOR_USERNAME + " must be set for EncryptedPasswordAuthenticator");
Preconditions.checkNotNull(this.password, AUTHENTICATOR_PASSWORD + " must be set for EncryptedPasswordAuthenticator");
}
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password.toCharArray());
}
}
| 1,979 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.password;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.jasypt.util.text.BasicTextEncryptor;
import org.jasypt.util.text.StrongTextEncryptor;
import org.jasypt.util.text.TextEncryptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.io.Closer;
import com.google.common.io.LineReader;
import lombok.EqualsAndHashCode;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* A class for managing password encryption and decryption. To encrypt or decrypt a password, a master password
* should be provided which is used as encryption or decryption key.
* Encryption is done with the single key provided.
* Decryption is tried with multiple keys to facilitate key rotation.
* If the master key file provided is /var/tmp/masterKey.txt, decryption is tried with keys at
* /var/tmp/masterKey.txt, /var/tmp/masterKey.txt.1, /var/tmp/masterKey.txt.2, and so on and so forth till
* either any such file does not exist or {@code this.NUMBER_OF_ENCRYPT_KEYS} attempts have been made.
*
* @author Ziyang Liu
*/
public class PasswordManager {
private static final Logger LOG = LoggerFactory.getLogger(PasswordManager.class);
private static final long CACHE_SIZE = 100;
private static final long CACHE_EXPIRATION_MIN = 10;
private static final Pattern PASSWORD_PATTERN = Pattern.compile("ENC\\((.*)\\)");
private final boolean useStrongEncryptor;
private FileSystem fs;
private List<TextEncryptor> encryptors;
private static final LoadingCache<CachedInstanceKey, PasswordManager> CACHED_INSTANCES =
CacheBuilder.newBuilder().maximumSize(CACHE_SIZE).expireAfterAccess(CACHE_EXPIRATION_MIN, TimeUnit.MINUTES)
.build(new CacheLoader<CachedInstanceKey, PasswordManager>() {
@Override
public PasswordManager load(CachedInstanceKey cacheKey) {
return new PasswordManager(cacheKey);
}
});
private PasswordManager(CachedInstanceKey cacheKey) {
this.useStrongEncryptor = cacheKey.useStrongEncryptor;
try {
this.fs = cacheKey.fsURI != null ? FileSystem.get(URI.create(cacheKey.fsURI), new Configuration())
: (cacheKey.masterPasswordFile != null ? new Path(cacheKey.masterPasswordFile).getFileSystem(new Configuration()) : null);
} catch (IOException e) {
LOG.warn("Failed to instantiate FileSystem.", e);
}
this.encryptors = getEncryptors(cacheKey);
}
private List<TextEncryptor> getEncryptors(CachedInstanceKey cacheKey) {
List<TextEncryptor> encryptors = new ArrayList<>();
int numOfEncryptionKeys = cacheKey.numOfEncryptionKeys;
String suffix = "";
int i = 1;
if (cacheKey.masterPasswordFile == null || numOfEncryptionKeys < 1) {
return encryptors;
}
Exception exception = null;
do {
Path currentMasterPasswordFile = new Path(cacheKey.masterPasswordFile + suffix);
try (Closer closer = Closer.create()) {
if (!fs.exists(currentMasterPasswordFile) ||
fs.getFileStatus(currentMasterPasswordFile).isDirectory()) {
continue;
}
InputStream in = closer.register(fs.open(currentMasterPasswordFile));
String masterPassword = new LineReader(new InputStreamReader(in, Charsets.UTF_8)).readLine();
TextEncryptor encryptor = useStrongEncryptor ? new StrongTextEncryptor() : new BasicTextEncryptor();
// setPassword() needs to be called via reflection since the TextEncryptor interface doesn't have this method.
encryptor.getClass().getMethod("setPassword", String.class).invoke(encryptor, masterPassword);
encryptors.add(encryptor);
suffix = "." + String.valueOf(i);
} catch (FileNotFoundException fnf) {
// It is ok for password files not being present
LOG.warn("Master password file " + currentMasterPasswordFile + " not found.");
} catch (IOException ioe) {
exception = ioe;
LOG.warn("Master password could not be read from file " + currentMasterPasswordFile);
} catch (Exception e) {
LOG.warn("Encryptor could not be instantiated.");
}
} while (i++ < numOfEncryptionKeys);
// Throw exception if could not read any existing password file
if (encryptors.size() < 1 && exception != null) {
throw new RuntimeException("Master Password could not be read from any master password file.", exception);
}
return encryptors;
}
/**
* Get an instance with no master password, which cannot encrypt or decrypt passwords.
*/
public static PasswordManager getInstance() {
try {
return CACHED_INSTANCES.get(new CachedInstanceKey());
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
}
/**
* Get an instance. The location of the master password file is provided via "encrypt.key.loc".
*/
public static PasswordManager getInstance(State state) {
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
}
/**
* Get an instance. The location of the master password file is provided via "encrypt.key.loc".
*/
public static PasswordManager getInstance(Properties props) {
return getInstance(new State(props));
}
/**
* Get an instance. The master password file is given by masterPwdLoc.
*/
public static PasswordManager getInstance(Path masterPwdLoc) {
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdLoc.toString());
state.setProp(ConfigurationKeys.ENCRYPT_KEY_FS_URI, masterPwdLoc.toUri());
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
}
private static boolean shouldUseStrongEncryptor(State state) {
return state.getPropAsBoolean(ConfigurationKeys.ENCRYPT_USE_STRONG_ENCRYPTOR,
ConfigurationKeys.DEFAULT_ENCRYPT_USE_STRONG_ENCRYPTOR);
}
/**
* Encrypt a password. A master password must have been provided in the constructor.
* @param plain A plain password to be encrypted.
* @return The encrypted password.
*/
public String encryptPassword(String plain) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for encrypting passwords.");
try {
return this.encryptors.get(0).encrypt(plain);
} catch (Exception e) {
throw new RuntimeException("Failed to encrypt password", e);
}
}
/**
* Decrypt an encrypted password. A master password file must have been provided in the constructor.
* @param encrypted An encrypted password.
* @return The decrypted password.
*/
public String decryptPassword(String encrypted) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for decrypting passwords.");
for (TextEncryptor encryptor : encryptors) {
try {
return encryptor.decrypt(encrypted);
} catch (Exception e) {
LOG.warn("Failed attempt to decrypt secret {}", encrypted, e);
}
}
LOG.error("All {} decrypt attempt(s) failed.", encryptors.size());
throw new RuntimeException("Failed to decrypt password ENC(" + encrypted + ")");
}
/**
* Decrypt a password if it is an encrypted password (in the form of ENC(.*))
* and a master password file has been provided in the constructor.
* Otherwise, return the password as is.
*/
public String readPassword(String password) {
if (password == null || encryptors.size() < 1) {
return password;
}
Matcher matcher = PASSWORD_PATTERN.matcher(password);
if (matcher.find()) {
return this.decryptPassword(matcher.group(1));
}
return password;
}
public static Optional<String> getMasterPassword(Path masterPasswordFile) {
try {
FileSystem fs = masterPasswordFile.getFileSystem(new Configuration());
return getMasterPassword(fs, masterPasswordFile);
} catch (IOException e) {
throw new RuntimeException("Failed to obtain master password from " + masterPasswordFile, e);
}
}
public static Optional<String> getMasterPassword(FileSystem fs, Path masterPasswordFile) {
try (Closer closer = Closer.create()) {
if (!fs.exists(masterPasswordFile) || fs.getFileStatus(masterPasswordFile).isDirectory()) {
LOG.warn(masterPasswordFile + " does not exist or is not a file. Cannot decrypt any encrypted password.");
return Optional.absent();
}
InputStream in = closer.register(fs.open(masterPasswordFile));
return Optional.of(new LineReader(new InputStreamReader(in, Charsets.UTF_8)).readLine());
} catch (IOException e) {
throw new RuntimeException("Failed to obtain master password from " + masterPasswordFile, e);
}
}
@EqualsAndHashCode
private static class CachedInstanceKey {
int numOfEncryptionKeys;
String fsURI;
String masterPasswordFile;
boolean useStrongEncryptor;
public CachedInstanceKey(State state) {
this.numOfEncryptionKeys = state.getPropAsInt(ConfigurationKeys.NUMBER_OF_ENCRYPT_KEYS, ConfigurationKeys.DEFAULT_NUMBER_OF_MASTER_PASSWORDS);
this.useStrongEncryptor = shouldUseStrongEncryptor(state);
this.fsURI = state.getProp(ConfigurationKeys.ENCRYPT_KEY_FS_URI);
this.masterPasswordFile = state.getProp(ConfigurationKeys.ENCRYPT_KEY_LOC);
}
public CachedInstanceKey() {
}
}
} | 1,980 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/records/FlushControlMessageHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.records;
import java.io.Flushable;
import java.io.IOException;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.FlushControlMessage;
/**
* Flush control message handler that will flush a {@link Flushable} when handling a {@link FlushControlMessage}
*/
public class FlushControlMessageHandler implements ControlMessageHandler {
protected Flushable flushable;
/**
* Create a flush control message that will flush the given {@link Flushable}
* @param flushable the flushable to flush when a {@link FlushControlMessage} is received
*/
public FlushControlMessageHandler(Flushable flushable) {
this.flushable = flushable;
}
@Override
public void handleMessage(ControlMessage message) {
if (message instanceof FlushControlMessage) {
try {
flushable.flush();
} catch (IOException e) {
throw new RuntimeException("Could not flush when handling FlushControlMessage", e);
}
}
}
}
| 1,981 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/records/RecordStreamWithMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.records;
import java.util.function.Function;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import io.reactivex.Flowable;
import lombok.Data;
/**
* A stream of records along with metadata (e.g. Schema).
* @param <D> type of record.
* @param <S> type of schema.
*/
@Data
public class RecordStreamWithMetadata<D, S> {
private final Flowable<StreamEntity<D>> recordStream;
private final GlobalMetadata<S> globalMetadata;
/**
* @return a new {@link RecordStreamWithMetadata} with a different {@link #recordStream} but same schema.
*/
public <DO> RecordStreamWithMetadata<DO, S> withRecordStream(Flowable<StreamEntity<DO>> newRecordStream) {
return withRecordStream(newRecordStream, this.globalMetadata);
}
/**
* @return a new {@link RecordStreamWithMetadata} with a different {@link #recordStream} and {@link #globalMetadata}.
*/
@Deprecated
public <DO, SO> RecordStreamWithMetadata<DO, SO> withRecordStream(Flowable<StreamEntity<DO>> newRecordStream, SO newSchema) {
return new RecordStreamWithMetadata<>(newRecordStream, GlobalMetadata.<SO>builder().schema(newSchema).build());
}
/**
* @return a new {@link RecordStreamWithMetadata} with a different {@link #recordStream} and {@link #globalMetadata}.
*/
public <DO, SO> RecordStreamWithMetadata<DO, SO> withRecordStream(Flowable<StreamEntity<DO>> newRecordStream,
GlobalMetadata<SO> newGlobalMetadata) {
return new RecordStreamWithMetadata<>(newRecordStream, newGlobalMetadata);
}
/**
* @return a new {@link RecordStreamWithMetadata} with a different {@link #recordStream} but same schema using a
* lambda expression on the stream.
*/
public <DO> RecordStreamWithMetadata<DO, S>
mapStream(Function<? super Flowable<StreamEntity<D>>, ? extends Flowable<StreamEntity<DO>>> transform) {
return new RecordStreamWithMetadata<>(transform.apply(this.recordStream), this.globalMetadata);
}
/**
* Apply the input mapping function to {@link RecordEnvelope}s, while letting other kinds of {@link StreamEntity}
* to pass through.
*/
public <DO> RecordStreamWithMetadata<DO, S> mapRecords(Function<RecordEnvelope<D>, RecordEnvelope<DO>> transform) {
return withRecordStream(this.recordStream.map(entity -> {
if (entity instanceof RecordEnvelope) {
return transform.apply((RecordEnvelope<D>) entity);
} else {
return (StreamEntity<DO>) entity;
}
}));
}
}
| 1,982 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/records/ControlMessageHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.records;
import org.apache.gobblin.stream.ControlMessage;
/**
* Handles {@link ControlMessage}s in the record stream.
*/
public interface ControlMessageHandler {
/**
* handle the input {@link ControlMessage}.
*/
void handleMessage(ControlMessage message);
/**
* A {@link ControlMessageHandler} that does nothing.
*/
ControlMessageHandler NOOP = new Noop();
class Noop implements ControlMessageHandler {
@Override
public void handleMessage(ControlMessage message) {
// NOOP
}
}
}
| 1,983 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/records/RecordStreamProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.records;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An object that applies some function to a {@link RecordStreamWithMetadata}.
*
* For example, converters, quality checkers (filters), etc. are instances of this.
*
* @param <SI> input schema type
* @param <SO> output schema type
* @param <DI> input data type
* @param <DO> output data type
*/
public interface RecordStreamProcessor<SI, SO, DI, DO> {
/**
* Return a {@link RecordStreamWithMetadata} with the appropriate modifications.
*/
RecordStreamWithMetadata<DO, SO> processStream(RecordStreamWithMetadata<DI, SI> inputStream, WorkUnitState state)
throws StreamProcessingException;
/**
* Exception allowed by {@link #processStream(RecordStreamWithMetadata, WorkUnitState)}.
*/
class StreamProcessingException extends IOException {
public StreamProcessingException(String message) {
super(message);
}
public StreamProcessingException(String message, Throwable cause) {
super(message, cause);
}
public StreamProcessingException(Throwable cause) {
super(cause);
}
}
}
| 1,984 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/records/RecordStreamConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.records;
/**
* An object that a {@link RecordStreamWithMetadata}.
* @param <S> schema type
* @param <D> data type
*/
public interface RecordStreamConsumer<S, D> {
/**
* Subscribe to the input {@link RecordStreamWithMetadata}.
*/
void consumeRecordStream(RecordStreamWithMetadata<D, S> stream) throws Exception;
}
| 1,985 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/compat | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/compat/hadoop/TextSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compat.hadoop;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
/**
* Serializes Java Strings similar to a Hadoop Text without depending on the underlying
* Hadoop libraries. Code is mostly taken from Hadoop 2.3's WritableUtils.
*/
public class TextSerializer {
/**
* Serialize a String using the same logic as a Hadoop Text object
*/
public static void writeStringAsText(DataOutput stream, String str) throws IOException {
byte[] utf8Encoded = str.getBytes(StandardCharsets.UTF_8);
writeVLong(stream, utf8Encoded.length);
stream.write(utf8Encoded);
}
/**
* Deserialize a Hadoop Text object into a String
*/
public static String readTextAsString(DataInput in) throws IOException {
int bufLen = (int)readVLong(in);
byte[] buf = new byte[bufLen];
in.readFully(buf);
return new String(buf, StandardCharsets.UTF_8);
}
/**
* From org.apache.hadoop.io.WritableUtis
*
* Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long
* is positive, with number of bytes that follow are -(v+112).
* If the first byte value v is between -121 and -128, the following long
* is negative, with number of bytes that follow are -(v+120). Bytes are
* stored in the high-non-zero-byte-first order.
*
* @param stream Binary output stream
* @param i Long to be serialized
* @throws java.io.IOException
*/
private static void writeVLong(DataOutput stream, long i) throws IOException {
if (i >= -112 && i <= 127) {
stream.writeByte((byte)i);
return;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
stream.writeByte((byte)len);
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
stream.writeByte((byte)((i & mask) >> shiftbits));
}
}
/**
* Reads a zero-compressed encoded long from input stream and returns it.
* @param stream Binary input stream
* @throws java.io.IOException
* @return deserialized long from stream.
*/
private static long readVLong(DataInput stream) throws IOException {
byte firstByte = stream.readByte();
int len = decodeVIntSize(firstByte);
if (len == 1) {
return firstByte;
}
long i = 0;
for (int idx = 0; idx < len-1; idx++) {
byte b = stream.readByte();
i = i << 8;
i = i | (b & 0xFF);
}
return (isNegativeVInt(firstByte) ? (i ^ -1L) : i);
}
/**
* Parse the first byte of a vint/vlong to determine the number of bytes
* @param value the first byte of the vint/vlong
* @return the total number of bytes (1 to 9)
*/
private static int decodeVIntSize(byte value) {
if (value >= -112) {
return 1;
} else if (value < -120) {
return -119 - value;
}
return -111 - value;
}
/**
* Given the first byte of a vint/vlong, determine the sign
* @param value the first byte
* @return is the value negative
*/
private static boolean isNegativeVInt(byte value) {
return value < -120 || (value >= -112 && value < 0);
}
}
| 1,986 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/compat | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/compat/hadoop/WritableShim.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compat.hadoop;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* An interface that mirrors Hadoop's Writable interface; this allows objects in gobblin-api
* to implement similar serializers without explicitly depending on Hadoop itself for the
* interface definition.
*
* Note: For deserialization to work, classes must either implement a no-parameter constructor
* or always pass an item for re-use in the Hadoop deserialize() call.
*/
public interface WritableShim {
void readFields(DataInput in) throws IOException;
void write(DataOutput out) throws IOException;
}
| 1,987 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/qualitychecker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/qualitychecker/row/RowLevelPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.qualitychecker.row;
import java.io.Closeable;
import java.io.IOException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.FinalState;
/**
* A policy that operates on each row
* and executes a given check
* @author stakiar
*/
public abstract class RowLevelPolicy implements Closeable, FinalState {
protected final State state;
private final Type type;
public enum Type {
FAIL, // Fail if the test does not pass
ERR_FILE, // Write record to error file
OPTIONAL // The test is optional
}
public enum Result {
PASSED, // The test passed
FAILED // The test failed
}
public RowLevelPolicy(State state, RowLevelPolicy.Type type) {
this.state = state;
this.type = type;
}
@Override
public void close() throws IOException {}
public State getTaskState() {
return this.state;
}
public Type getType() {
return this.type;
}
public String getErrFileLocation() {
return this.state.getProp(ConfigurationKeys.ROW_LEVEL_ERR_FILE);
}
public abstract Result executePolicy(Object record);
@Override
public String toString() {
return this.getClass().getName();
}
/**
* Get final state for this object. By default this returns an empty {@link org.apache.gobblin.configuration.State}, but
* concrete subclasses can add information that will be added to the task state.
* @return Empty {@link org.apache.gobblin.configuration.State}.
*/
@Override
public State getFinalState() {
return new State();
}
}
| 1,988 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/qualitychecker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/qualitychecker/task/TaskLevelPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.qualitychecker.task;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
public abstract class TaskLevelPolicy {
private final State state;
private final Type type;
public enum Type {
FAIL, // Fail if the test does not pass
OPTIONAL // The test is optional
}
public enum Result {
PASSED, // The test passed
FAILED // The test failed
}
public TaskLevelPolicy(State state, TaskLevelPolicy.Type type) {
this.state = state;
this.type = type;
}
public State getTaskState() {
return this.state;
}
public Type getType() {
return this.type;
}
public abstract Result executePolicy();
@Override
public String toString() {
return this.getClass().getName();
}
public State getPreviousTableState() {
WorkUnitState workUnitState = (WorkUnitState) this.state;
return workUnitState.getPreviousTableState();
}
}
| 1,989 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/SimpleScopeType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.broker.iface.ScopeInstance;
import org.apache.gobblin.broker.iface.ScopeType;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
/**
* Simple {@link ScopeType} topology with only two levels.
*/
@AllArgsConstructor
public enum SimpleScopeType implements ScopeType<SimpleScopeType> {
GLOBAL("global"),
LOCAL("local", GLOBAL);
private static final Set<SimpleScopeType> LOCAL_SCOPES = Sets.newHashSet(LOCAL);
private final List<SimpleScopeType> parentScopes;
private final String defaultId;
SimpleScopeType(String defaultId, SimpleScopeType... parentScopes) {
this.defaultId = defaultId;
this.parentScopes = Lists.newArrayList(parentScopes);
}
@Override
public boolean isLocal() {
return LOCAL_SCOPES.contains(this);
}
@Override
public Collection<SimpleScopeType> parentScopes() {
return this.parentScopes;
}
@Nullable
@Override
public ScopeInstance defaultScopeInstance() {
return this.defaultId == null ? null : new SimpleScope(this, this.defaultId);
}
@Override
public SimpleScopeType rootScope() {
return GLOBAL;
}
}
| 1,990 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/ResourceEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
/**
* A {@link SharedResourceFactoryResponse} containing a instance of a resource.
*/
public interface ResourceEntry<T> extends SharedResourceFactoryResponse<T> {
/**
* @return The instance of the resource.
*/
T getResource();
/**
* @return Whether this entry is valid. If the entry is invalid, it will be invalidated from the cache, causing a new
* call to the {@link org.apache.gobblin.broker.iface.SharedResourceFactory}.
*/
boolean isValid();
/**
* This method will be called when the entry is invalidated. It may or may not close the contained resource depending
* on the semantics the {@link org.apache.gobblin.broker.iface.SharedResourceFactory} wishes to provide (e.g. whether already
* acquired objects should be closed).
*
* Note that for consistency, the broker runs this method synchronously before a new instance is created for the same
* key, blocking all requests for that key. As suck, this method should be reasonably fast.
*/
void onInvalidate();
/**
* This method should guarantee that if all callers accessing the resource using this method then the object is
* returned atomically with respect to any validity state change.
*
* This is to avoid race conditions in cases where the state is changed when getting the resource. Some examples are
* resources that can only be used a certain number of times.
*
* @return null if the object is not valid, otherwise the valid object
*/
default T getResourceIfValid() {
if (isValid()) {
return getResource();
} else {
return null;
}
}
}
| 1,991 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/BrokerConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
/**
* Constants used for {@link org.apache.gobblin.broker.iface.SharedResourcesBroker}.
*/
public class BrokerConstants {
public static final String GOBBLIN_BROKER_CONFIG_PREFIX = "gobblin.broker";
public static final String GOBBLIN_BROKER_CONFIG_NAMESPACES = "gobblin.brokerNamespaces";
}
| 1,992 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/ResourceCoordinate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourceKey;
import lombok.Data;
/**
* A {@link SharedResourceFactoryResponse} that indicates the broker should return the object at a different coordinate
* (ie. factory, key, scope combination).
*/
@Data
public class ResourceCoordinate<T, K extends SharedResourceKey, S extends ScopeType<S>> implements SharedResourceFactoryResponse<T> {
private final SharedResourceFactory<T, K, S> factory;
private final K key;
private final S scope;
}
| 1,993 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/StringNameSharedResourceKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
import org.apache.gobblin.broker.iface.SharedResourceKey;
/**
* A {@link SharedResourceKey} with only a string name
*/
public class StringNameSharedResourceKey implements SharedResourceKey {
private final String name;
public StringNameSharedResourceKey(String name) {
this.name = name;
}
@Override
public String toConfigurationKey() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
StringNameSharedResourceKey that = (StringNameSharedResourceKey) o;
return name != null ? name.equals(that.name) : that.name == null;
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
}
| 1,994 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/SimpleScope.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker;
import org.apache.gobblin.broker.iface.ScopeInstance;
import org.apache.gobblin.broker.iface.ScopeType;
import lombok.Data;
/**
* A simple {@link ScopeInstance} implementation containing just a {@link ScopeType} and an string id.
*/
@Data
public class SimpleScope<S extends ScopeType<S>> implements ScopeInstance<S> {
private final S type;
private final String scopeId;
}
| 1,995 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/iface/SubscopedBrokerBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker.iface;
import com.typesafe.config.Config;
import org.apache.gobblin.broker.BrokerConstants;
/**
* A builder used to create new {@link SharedResourcesBroker} compatible with an existing {@link SharedResourcesBroker}
* (i.e. guaranteeing objects are correctly shared among scopes).
*/
public interface SubscopedBrokerBuilder<S extends ScopeType<S>, B extends SharedResourcesBroker<S>> {
/**
* Specify additional ancestor {@link SharedResourcesBroker}. Useful when a {@link ScopeType} has multiple parents.
*/
SubscopedBrokerBuilder<S, B> withAdditionalParentBroker(SharedResourcesBroker<S> broker);
/**
* Specify {@link Config} overrides. Note these overrides will only be applicable at the new leaf scope and descendant
* scopes. {@link Config} entries must start with {@link BrokerConstants#GOBBLIN_BROKER_CONFIG_PREFIX} (any entries
* not satisfying that condition will be ignored).
*/
SubscopedBrokerBuilder<S, B> withOverridingConfig(Config config);
/**
* @return the new {@link SharedResourcesBroker}.
*/
B build();
}
| 1,996 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/iface/NoSuchScopeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker.iface;
/**
* Exception thrown when trying to access a {@link ScopeInstance} that is not defined in a {@link SharedResourcesBroker}.
*/
public class NoSuchScopeException extends Exception {
public <S extends ScopeType<S>> NoSuchScopeException(S scope) {
super(String.format("Scope %s is not defined in this %s.", scope, SharedResourcesBroker.class.getSimpleName()));
}
}
| 1,997 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/iface/NotConfiguredException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker.iface;
/**
* Exception thrown by a factory if there is not enough provided configuration to create the
* shared object.
*/
public class NotConfiguredException extends Exception {
public NotConfiguredException() {
}
public NotConfiguredException(String message) {
super(message);
}
}
| 1,998 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/broker/iface/SharedResourcesBroker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.broker.iface;
import java.io.Closeable;
import java.io.IOException;
/**
* A class that provides access to objects shared by multiple components within a process, as well as objects virtually
* shared among different processes (i.e. objects that synchronize with equivalent objects in other processes).
*
* Different parts of an application may require objects that are ideally shared. For example, if multiple objects are
* writing to the same file, it is convenient to have a single file handle. Instead of passing around all shared objects,
* using static objects, or doing dependency injection, a {@link SharedResourcesBroker} provides a way to acquire such shared
* objects as needed, letting the broker manage the lifecycle of the objects.
*
* Objects are created using {@link SharedResourceFactory}s,
* and are specific to a particular {@link ScopeInstance} and
* {@link SharedResourceKey}. {@link ScopeInstance}s represent a DAG of relevant scopes in the application, for example
* GLOBAL -> JOB -> TASK. {@link SharedResourceKey} identify different objects created by the same factory, for example
* handles to different files could be keyed by the file path.
*
* {@link SharedResourcesBroker} guarantees that multiple requests for objects with the same factory,
* {@link ScopeInstance} and {@link SharedResourceKey} return the same object, even if called for different {@link SharedResourcesBroker}
* instances. This guarantee requires that new brokers are created using {@link #newSubscopedBuilder(ScopeInstance)} only.
*
* @param <S> the {@link ScopeType} tree used by this {@link SharedResourcesBroker}.
*/
public interface SharedResourcesBroker<S extends ScopeType<S>> extends Closeable {
/**
* @return The lowest defined {@link ScopeInstance} in this {@link SharedResourcesBroker}.
* This is provides the lowest {@link ScopeType} at which the {@link SharedResourcesBroker} can return shared objects.
*/
ScopeInstance<S> selfScope();
/**
* Get the {@link ScopeInstance} in this brokers topology at the provided {@link ScopeType}.
* @throws NoSuchScopeException if the input {@link ScopeType} is lower than that of {@link #selfScope()}.
*/
ScopeInstance<S> getScope(S scopeType) throws NoSuchScopeException;
/**
* Get a shared resource created by the input {@link SharedResourceFactory}. The resource will be shared
* at least by all brokers with the same {@link #selfScope()}, but the factory may chose to create the resource
* at a higher scope.
*
* @param factory The {@link SharedResourceFactory} used to create the shared object.
* @param key Identifies different objects from the same factory in the same {@link ScopeInstance}.
* @param <T> type of object created by the factory.
* @param <K> type of factory accepted by the factory.
* @return an object of type T.
* @throws NotConfiguredException
*/
<T, K extends SharedResourceKey> T getSharedResource(SharedResourceFactory<T, K, S> factory, K key)
throws NotConfiguredException;
/**
* Get a shared resource created by the input {@link SharedResourceFactory} at the {@link ScopeInstance}
* returned by {@link #getScope)} on the input {@link ScopeType}.
*
* @param factory The {@link SharedResourceFactory} used to create the shared object.
* @param key Identifies different objects from the same factory in the same {@link ScopeInstance}.
* @param scopeType {@link ScopeType} at which the object will be obtained.
* @param <T> type of object created by the factory.
* @param <K> type of factory accepted by the factory.
* @return an object of type T.
* @throws NotConfiguredException
* @throws NoSuchScopeException
*/
<T, K extends SharedResourceKey> T getSharedResourceAtScope(SharedResourceFactory<T, K, S> factory, K key,
S scopeType) throws NotConfiguredException, NoSuchScopeException;
/**
* Bind an instance to the input factory, key, and scope.
*
* @param factory The {@link SharedResourceFactory} used to create the shared object.
* @param key Identifies different objects from the same factory in the same {@link ScopeInstance}.
* @param scopeType {@link ScopeType} at which the object will be obtained.
* @param instance the instance to bind.
* @param <T> type of object created by the factory.
* @param <K> type of factory accepted by the factory.
* @throws NoSuchScopeException
*/
<T, K extends SharedResourceKey> void bindSharedResourceAtScope(SharedResourceFactory<T, K, S> factory, K key,
S scopeType, T instance) throws NoSuchScopeException;
/**
* Close all resources at this and descendant scopes, meaning {@link Closeable}s will be closed and
* {@link com.google.common.util.concurrent.Service}s will be shut down. Future calls to get the same resource will
* instantiate a new resource instead.
*
* Best practice guideline: this method should only be called by the object who created the {@link SharedResourcesBroker}.
* Objects or methods which received an already built {@link SharedResourcesBroker} should not call this method.
* @throws IOException
*/
@Override
void close()
throws IOException;
/**
* Get a builder to create a descendant {@link SharedResourcesBroker}.
*
* @param subscope the {@link ScopeInstance} of the new {@link SharedResourcesBroker}.
* @return a {@link SubscopedBrokerBuilder}.
*/
SubscopedBrokerBuilder<S, ?> newSubscopedBuilder(ScopeInstance<S> subscope);
}
| 1,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.