index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/troubleshooter/AutomaticTroubleshooterFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.troubleshooter;
import java.util.Properties;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import static org.junit.Assert.assertTrue;
public class AutomaticTroubleshooterFactoryTest {
@Test
public void willGetNoopTroubleshooterByDefault() {
// This test project does not reference gobblin-troubleshooter module, so we should get a noop-instance
// of troubleshooter. See the main AutomaticTroubleshooterFactory class for details.
Properties properties = new Properties();
AutomaticTroubleshooter troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(properties));
assertTrue(troubleshooter instanceof NoopAutomaticTroubleshooter);
}
@Test
public void willGetNoopTroubleshooterWhenDisabled() {
Properties properties = new Properties();
properties.put(ConfigurationKeys.TROUBLESHOOTER_DISABLED, "true");
AutomaticTroubleshooter troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(properties));
assertTrue(troubleshooter instanceof NoopAutomaticTroubleshooter);
}
} | 1,300 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/kafka/MockedHighLevelConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.kafka;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.stream.Collectors;
import com.google.common.base.Predicate;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.testing.AssertWithBackoff;
@Slf4j
public class MockedHighLevelConsumer extends HighLevelConsumer<byte[], byte[]> {
@Getter
private final BlockingQueue<byte[]> messages;
@Getter
private final Map<KafkaPartition, Long> committedOffsets;
public MockedHighLevelConsumer(String topic, Config config, int numThreads) {
super(topic, config, numThreads);
this.messages = new LinkedBlockingQueue<>();
this.committedOffsets = new ConcurrentHashMap<>();
}
public void awaitExactlyNMessages(final int n, int timeoutMillis) throws Exception {
AssertWithBackoff.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(@Nullable Void input) {
return MockedHighLevelConsumer.this.messages.size() == n;
}
}, timeoutMillis, n + " messages", log, 2, 1000);
}
@Override
protected void processMessage(DecodeableKafkaRecord<byte[], byte[]> message) {
this.messages.offer(message.getValue());
}
@Override
protected void commitOffsets(Map<KafkaPartition, Long> partitionOffsets) {
super.commitOffsets(partitionOffsets);
committedOffsets.putAll(partitionOffsets.entrySet().stream().collect(Collectors
.toMap(e -> e.getKey(), e -> e.getValue())));
}
@Override
public void shutDown() {
super.shutDown();
}
}
| 1,301 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/instance/TestStandardGobblinInstanceDriver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment;
import org.apache.gobblin.runtime.api.GobblinInstancePlugin;
import org.apache.gobblin.runtime.api.GobblinInstancePluginFactory;
import org.apache.gobblin.runtime.plugins.email.EmailNotificationPlugin;
import org.apache.gobblin.runtime.std.DefaultConfigurableImpl;
import com.google.common.collect.ImmutableMap;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Unit tests for {@link StandardGobblinInstanceDriver}
*/
public class TestStandardGobblinInstanceDriver {
@Test
public void testBuilder() {
Config instanceCfg = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(StandardGobblinInstanceDriver.PLUGINS_FULL_KEY, "fake1")
.put(EmailNotificationPlugin.EMAIL_NOTIFICATIONS_DISABLED_KEY, Boolean.valueOf(true))
.build());
GobblinInstanceEnvironment mockEnv = Mockito.mock(GobblinInstanceEnvironment.class);
Mockito.when(mockEnv.getSysConfig())
.thenReturn(DefaultConfigurableImpl.createFromConfig(instanceCfg));
StandardGobblinInstanceDriver.Builder builder =
StandardGobblinInstanceDriver.builder()
.withInstanceEnvironment(mockEnv)
.addPlugin(new FakePluginFactory2());
List<GobblinInstancePluginFactory> plugins = builder.getPlugins();
Assert.assertEquals(plugins.size(), 2);
Set<Class<?>> pluginClasses = new HashSet<>();
pluginClasses.add(plugins.get(0).getClass());
pluginClasses.add(plugins.get(1).getClass());
Assert.assertTrue(pluginClasses.contains(FakePluginFactory1.class));
Assert.assertTrue(pluginClasses.contains(FakePluginFactory2.class));
}
@AllArgsConstructor
static class FakePlugin extends AbstractIdleService implements GobblinInstancePlugin {
@Getter final GobblinInstanceDriver instance;
@Override protected void startUp() throws Exception {
// Do nothing
}
@Override protected void shutDown() throws Exception {
// Do nothing
}
}
@Alias("fake1")
static class FakePluginFactory1 implements GobblinInstancePluginFactory {
@Override public GobblinInstancePlugin createPlugin(GobblinInstanceDriver instance) {
return new FakePlugin(instance);
}
}
@Alias("fake2")
static class FakePluginFactory2 implements GobblinInstancePluginFactory {
@Override public GobblinInstancePlugin createPlugin(GobblinInstanceDriver instance) {
return new FakePlugin(instance);
}
}
}
| 1,302 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/instance/TestDefaultGobblinInstanceDriverImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.util.concurrent.Service.State;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.runtime.api.Configurable;
import org.apache.gobblin.runtime.api.GobblinInstancePluginFactory;
import org.apache.gobblin.runtime.api.JobExecutionLauncher;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecScheduler;
import org.apache.gobblin.runtime.job_catalog.InMemoryJobCatalog;
import org.apache.gobblin.runtime.std.DefaultConfigurableImpl;
import org.apache.gobblin.runtime.std.DefaultJobSpecScheduleImpl;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Unit tests for {@link DefaultGobblinInstanceDriverImpl}
*/
public class TestDefaultGobblinInstanceDriverImpl {
@Test
public void testScheduling() throws Exception {
final Logger log = LoggerFactory.getLogger(getClass().getName() + ".testScheduling");
final Optional<Logger> loggerOpt = Optional.of(log);
InMemoryJobCatalog jobCatalog = new InMemoryJobCatalog(loggerOpt);
JobSpecScheduler scheduler = Mockito.mock(JobSpecScheduler.class);
JobExecutionLauncher jobLauncher = Mockito.mock(JobExecutionLauncher.class);
Configurable sysConfig = DefaultConfigurableImpl.createFromConfig(ConfigFactory.empty());
final DefaultGobblinInstanceDriverImpl driver =
new StandardGobblinInstanceDriver("testScheduling", sysConfig, jobCatalog, scheduler,
jobLauncher,
Optional.<MetricContext>absent(),
loggerOpt,
Collections.<GobblinInstancePluginFactory>emptyList(), SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.empty(),
GobblinScopeTypes.GLOBAL.defaultScopeInstance()));
JobSpec js1_1 = JobSpec.builder("test.job1").withVersion("1").build();
JobSpec js1_2 = JobSpec.builder("test.job1").withVersion("2").build();
JobSpec js2 = JobSpec.builder("test.job2").withVersion("1").build();
driver.startAsync().awaitRunning(1000, TimeUnit.MILLISECONDS);
long startTimeMs = System.currentTimeMillis();
Assert.assertTrue(driver.isRunning());
Assert.assertTrue(driver.isInstrumentationEnabled());
Assert.assertNotNull(driver.getMetricContext());
jobCatalog.put(js1_1);
AssertWithBackoff awb = AssertWithBackoff.create().backoffFactor(1.5).maxSleepMs(100)
.timeoutMs(1000).logger(log);
awb.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
log.debug("upFlag=" + driver.getMetrics().getUpFlag().getValue().intValue());
return driver.getMetrics().getUpFlag().getValue().intValue() == 1;
}
}, "upFlag==1");
jobCatalog.put(js2);
jobCatalog.put(js1_2);
jobCatalog.remove(js2.getUri());
Mockito.when(
scheduler.scheduleJob(Mockito.eq(js1_1),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class)))
.thenReturn(DefaultJobSpecScheduleImpl.createNoSchedule(js1_1, null));
Mockito.when(
scheduler.scheduleJob(Mockito.eq(js2),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class)))
.thenReturn(DefaultJobSpecScheduleImpl.createNoSchedule(js2, null));
Mockito.when(
scheduler.scheduleJob(Mockito.eq(js1_2),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class)))
.thenReturn(DefaultJobSpecScheduleImpl.createNoSchedule(js1_2, null));
Mockito.verify(scheduler).scheduleJob(Mockito.eq(js1_1),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class));
Mockito.verify(scheduler).scheduleJob(Mockito.eq(js2),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class));
Mockito.verify(scheduler).scheduleJob(Mockito.eq(js1_2),
Mockito.any(DefaultGobblinInstanceDriverImpl.JobSpecRunnable.class));
Mockito.verify(scheduler).unscheduleJob(Mockito.eq(js2.getUri()));
final long elapsedMs = System.currentTimeMillis() - startTimeMs;
awb.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
long uptimeMs = driver.getMetrics().getUptimeMs().getValue().longValue();
return uptimeMs >= elapsedMs;
}
}, "uptime > elapsedMs");
long uptimeMs = driver.getMetrics().getUptimeMs().getValue().longValue();
Assert.assertTrue(uptimeMs <= 2 * elapsedMs, "uptime=" + uptimeMs + " elapsedMs=" + elapsedMs);
driver.stopAsync();
driver.awaitTerminated(100, TimeUnit.MILLISECONDS);
Assert.assertEquals(driver.state(), State.TERMINATED);
Assert.assertEquals(driver.getMetrics().getUpFlag().getValue().intValue(), 0);
// Need an assert with retries because Guava service container notifications are async
awb.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
log.debug("upTimeMs=" + driver.getMetrics().getUptimeMs().getValue().longValue());
return driver.getMetrics().getUptimeMs().getValue().longValue() == 0;
}
}, "upTimeMs==0");
}
}
| 1,303 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/instance/TestStandardGobblinInstanceLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance;
import java.net.URI;
import java.util.ArrayList;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.JobExecutionDriver;
import org.apache.gobblin.runtime.api.JobExecutionLauncher;
import org.apache.gobblin.runtime.api.JobExecutionMonitor;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.api.JobLifecycleListener;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.instance.DefaultGobblinInstanceDriverImpl.JobSpecRunnable;
import org.apache.gobblin.runtime.job_exec.JobLauncherExecutionDriver;
import org.apache.gobblin.runtime.job_spec.ResolvedJobSpec;
import org.apache.gobblin.runtime.std.DefaultJobLifecycleListenerImpl;
import org.apache.gobblin.runtime.std.FilteredJobLifecycleListener;
import org.apache.gobblin.runtime.std.JobSpecFilter;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.test.HelloWorldSource;
import org.apache.gobblin.writer.test.GobblinTestEventBusWriter;
import org.apache.gobblin.writer.test.TestingEventBusAsserter;
/**
* Unit tests for {@link StandardGobblinInstanceLauncher}
*/
public class TestStandardGobblinInstanceLauncher {
@Test
/** Test running of a job when submitted directly to the execution driver*/
public void testDirectToExecutionDriver() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder =
StandardGobblinInstanceLauncher.builder()
.withInstanceName("testDirectToExecutionDriver");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher =
instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder()
.withConfig(ConfigFactory.parseResources("gobblin/runtime/instance/SimpleHelloWorldJob.jobconf"))
.build();
GobblinInstanceDriver instance = instanceLauncher.getDriver();
final JobExecutionLauncher.StandardMetrics launcherMetrics =
instance.getJobLauncher().getMetrics();
AssertWithBackoff asb = new AssertWithBackoff().timeoutMs(100);
checkLaunchJob(instanceLauncher, js1, instance);
Assert.assertEquals(launcherMetrics.getNumJobsLaunched().getCount(), 1);
Assert.assertEquals(launcherMetrics.getNumJobsCompleted().getCount(), 1);
// Need to use assert with backoff because of race conditions with the callback that updates the
// metrics
asb.assertEquals(new Function<Void, Long>() {
@Override public Long apply(Void input) {
return launcherMetrics.getNumJobsCommitted().getCount();
}
}, 1l, "numJobsCommitted==1");
Assert.assertEquals(launcherMetrics.getNumJobsFailed().getCount(), 0);
Assert.assertEquals(launcherMetrics.getNumJobsRunning().getValue().intValue(), 0);
checkLaunchJob(instanceLauncher, js1, instance);
Assert.assertEquals(launcherMetrics.getNumJobsLaunched().getCount(), 2);
Assert.assertEquals(launcherMetrics.getNumJobsCompleted().getCount(), 2);
asb.assertEquals(new Function<Void, Long>() {
@Override public Long apply(Void input) {
return launcherMetrics.getNumJobsCommitted().getCount();
}
}, 2l, "numJobsCommitted==2");
Assert.assertEquals(launcherMetrics.getNumJobsFailed().getCount(), 0);
Assert.assertEquals(launcherMetrics.getNumJobsRunning().getValue().intValue(), 0);
}
private void checkLaunchJob(StandardGobblinInstanceLauncher instanceLauncher, JobSpec js1,
GobblinInstanceDriver instance) throws TimeoutException, InterruptedException, ExecutionException {
JobExecutionDriver jobDriver = null;
JobExecutionMonitor monitor = instance.getJobLauncher().launchJob(js1);
if (monitor instanceof JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) {
jobDriver = ((JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) monitor).getDriver();
}
new Thread(jobDriver).run();
JobExecutionResult jobResult = jobDriver.get(5, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
instanceLauncher.stopAsync();
instanceLauncher.awaitTerminated(5, TimeUnit.SECONDS);
Assert.assertEquals(instance.getMetrics().getUpFlag().getValue().intValue(), 0);
Assert.assertEquals(instance.getMetrics().getUptimeMs().getValue().longValue(), 0);
}
@Test
/** Test running of a job when submitted directly to the scheduler */
public void testDirectToScheduler() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder =
StandardGobblinInstanceLauncher.builder()
.withInstanceName("testDirectToScheduler");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher =
instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder()
.withConfig(ConfigFactory.parseResources("gobblin/runtime/instance/SimpleHelloWorldJob.jobconf"))
.build();
final StandardGobblinInstanceDriver instance =
(StandardGobblinInstanceDriver)instanceLauncher.getDriver();
final ArrayBlockingQueue<JobExecutionDriver> jobDrivers = new ArrayBlockingQueue<>(1);
JobLifecycleListener js1Listener = new FilteredJobLifecycleListener(
JobSpecFilter.eqJobSpecURI(js1.getUri()),
new DefaultJobLifecycleListenerImpl(instance.getLog()) {
@Override public void onJobLaunch(JobExecutionDriver jobDriver) {
super.onJobLaunch(jobDriver);
try {
jobDrivers.offer(jobDriver, 5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
instance.getLog().error("Offer interrupted.");
}
}
});
instance.registerWeakJobLifecycleListener(js1Listener);
JobSpecRunnable js1Runnable = instance.createJobSpecRunnable(js1);
instance.getJobScheduler().scheduleOnce(js1, js1Runnable);
JobExecutionDriver jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
JobExecutionResult jobResult = jobDriver.get(5, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
instanceLauncher.stopAsync();
instanceLauncher.awaitTerminated(5, TimeUnit.SECONDS);
}
@Test
/** Test running of a job using the standard path of submitting to the job catalog */
public void testSubmitToJobCatalog() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder =
StandardGobblinInstanceLauncher.builder()
.withInstanceName("testSubmitToJobCatalog");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher =
instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder()
.withConfig(ConfigFactory.parseResources("gobblin/runtime/instance/SimpleHelloWorldJob.jobconf"))
.build();
final String eventBusId = js1.getConfig().resolve().getString(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY);
TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId);
final StandardGobblinInstanceDriver instance =
(StandardGobblinInstanceDriver)instanceLauncher.getDriver();
final ArrayBlockingQueue<JobExecutionDriver> jobDrivers = new ArrayBlockingQueue<>(1);
JobLifecycleListener js1Listener = new FilteredJobLifecycleListener(
JobSpecFilter.eqJobSpecURI(js1.getUri()),
new DefaultJobLifecycleListenerImpl(instance.getLog()) {
@Override public void onJobLaunch(JobExecutionDriver jobDriver) {
super.onJobLaunch(jobDriver);
try {
jobDrivers.offer(jobDriver, 5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
instance.getLog().error("Offer interrupted.");
}
}
});
instance.registerWeakJobLifecycleListener(js1Listener);
instance.getMutableJobCatalog().put(js1);
JobExecutionDriver jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
JobExecutionResult jobResult = jobDriver.get(5, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
instanceLauncher.stopAsync();
final int numHellos = js1.getConfig().getInt(HelloWorldSource.NUM_HELLOS_FULL_KEY);
ArrayList<String> expectedEvents = new ArrayList<>();
for (int i = 1; i <= numHellos; ++i) {
expectedEvents.add(HelloWorldSource.ExtractorImpl.helloMessage(i));
}
asserter.assertNextValuesEq(expectedEvents);
asserter.close();
instanceLauncher.awaitTerminated(5, TimeUnit.SECONDS);
}
@Test
public void testSubmitWithTemplate() throws Exception {
StandardGobblinInstanceLauncher.Builder instanceLauncherBuilder =
StandardGobblinInstanceLauncher.builder()
.withInstanceName("testSubmitWithTemplate");
instanceLauncherBuilder.driver();
StandardGobblinInstanceLauncher instanceLauncher =
instanceLauncherBuilder.build();
instanceLauncher.startAsync();
instanceLauncher.awaitRunning(5, TimeUnit.SECONDS);
JobSpec js1 = JobSpec.builder()
.withConfig(ConfigFactory.parseMap(ImmutableMap.of("numHellos", "5")))
.withTemplate(new URI("resource:///gobblin/runtime/instance/SimpleHelloWorldJob.template"))
.build();
ResolvedJobSpec js1Resolved = new ResolvedJobSpec(js1);
final String eventBusId = js1Resolved.getConfig().getString(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY);
TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId);
final StandardGobblinInstanceDriver instance =
(StandardGobblinInstanceDriver)instanceLauncher.getDriver();
final ArrayBlockingQueue<JobExecutionDriver> jobDrivers = new ArrayBlockingQueue<>(1);
JobLifecycleListener js1Listener = new FilteredJobLifecycleListener(
JobSpecFilter.eqJobSpecURI(js1.getUri()),
new DefaultJobLifecycleListenerImpl(instance.getLog()) {
@Override public void onJobLaunch(JobExecutionDriver jobDriver) {
super.onJobLaunch(jobDriver);
try {
jobDrivers.offer(jobDriver, 5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
instance.getLog().error("Offer interrupted.");
}
}
});
instance.registerWeakJobLifecycleListener(js1Listener);
instance.getMutableJobCatalog().put(js1);
JobExecutionDriver jobDriver = jobDrivers.poll(10, TimeUnit.SECONDS);
Assert.assertNotNull(jobDriver);
JobExecutionResult jobResult = jobDriver.get(5, TimeUnit.SECONDS);
Assert.assertTrue(jobResult.isSuccessful());
instanceLauncher.stopAsync();
final int numHellos = js1Resolved.getConfig().getInt(HelloWorldSource.NUM_HELLOS_FULL_KEY);
ArrayList<String> expectedEvents = new ArrayList<>();
for (int i = 1; i <= numHellos; ++i) {
expectedEvents.add(HelloWorldSource.ExtractorImpl.helloMessage(i));
}
asserter.assertNextValuesEq(expectedEvents);
asserter.close();
instanceLauncher.awaitTerminated(5, TimeUnit.SECONDS);
}
}
| 1,304 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/instance | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/instance/hadoop/TestHadoopConfigLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.google.common.collect.ImmutableMap;
/**
* Unit tests for {@link HadoopConfigLoader}
*/
public class TestHadoopConfigLoader {
@Test
public void testOverride() {
Config testConfig = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put("c.d", "1")
.put("e.f", "2")
.put("hadoop-inject.a.b.c.ROOT", "2")
.put("hadoop-inject.a.b.c.d", "3")
.put("hadoop-inject.e.f", "4")
.build());
HadoopConfigLoader configLoader = new HadoopConfigLoader(testConfig);
Configuration conf1 = configLoader.getConf();
Assert.assertEquals(conf1.get("a.b.c"), "2");
Assert.assertEquals(conf1.get("a.b.c.d"), "3");
Assert.assertEquals(conf1.get("e.f"), "4");
conf1.set("e.f", "5");
Assert.assertEquals(conf1.get("e.f"), "5");
Configuration conf2 = configLoader.getConf();
Assert.assertEquals(conf2.get("a.b.c"), "2");
Assert.assertEquals(conf2.get("a.b.c.d"), "3");
Assert.assertEquals(conf2.get("e.f"), "4");
}
}
| 1,305 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/job/JobInterruptionPredicateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.JobState;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import lombok.AllArgsConstructor;
import lombok.Getter;
public class JobInterruptionPredicateTest {
@Test
public void testJobPredicate() {
SettableJobProgress jobProgress = new SettableJobProgress("job123", 10, 0, 0, JobState.RunningState.RUNNING, new ArrayList<>());
AtomicBoolean atomicBoolean = new AtomicBoolean(false);
JobInterruptionPredicate predicate =
new JobInterruptionPredicate(jobProgress, "SELECT completedTasks > 5 FROM jobProgress", () -> atomicBoolean.set(true), false);
predicate.runOneIteration();
Assert.assertFalse(atomicBoolean.get());
jobProgress.completedTasks = 6;
predicate.runOneIteration();
Assert.assertTrue(atomicBoolean.get());
}
@Test
public void testTaskPredicate() {
SettableTaskProgress t1 = new SettableTaskProgress("j1", "t1", WorkUnitState.WorkingState.RUNNING, false);
SettableTaskProgress t2 = new SettableTaskProgress("j1", "t1", WorkUnitState.WorkingState.RUNNING, false);
SettableJobProgress jobProgress = new SettableJobProgress("job123", 10, 0, 0, JobState.RunningState.RUNNING,
Lists.newArrayList(t1, t2));
AtomicBoolean atomicBoolean = new AtomicBoolean(false);
JobInterruptionPredicate predicate =
new JobInterruptionPredicate(jobProgress, "SELECT count(*) > 0 FROM taskProgress WHERE workingState = 'FAILED'", () -> atomicBoolean.set(true), false);
predicate.runOneIteration();
Assert.assertFalse(atomicBoolean.get());
t2.workingState = WorkUnitState.WorkingState.FAILED;
predicate.runOneIteration();
Assert.assertTrue(atomicBoolean.get());
}
@Test
public void testTaskAndJobPredicate() {
SettableTaskProgress t1 = new SettableTaskProgress("j1", "t1", WorkUnitState.WorkingState.RUNNING, false);
SettableTaskProgress t2 = new SettableTaskProgress("j1", "t1", WorkUnitState.WorkingState.RUNNING, false);
SettableJobProgress jobProgress = new SettableJobProgress("job123", 10, 0, 0, JobState.RunningState.RUNNING,
Lists.newArrayList(t1, t2));
AtomicBoolean atomicBoolean = new AtomicBoolean(false);
JobInterruptionPredicate predicate =
new JobInterruptionPredicate(jobProgress,
"SELECT EXISTS(SELECT * FROM (SELECT completedTasks > 5 AS pred FROM jobProgress UNION SELECT count(*) > 0 AS pred FROM taskProgress WHERE workingState = 'FAILED') WHERE pred)",
() -> atomicBoolean.set(true), false);
predicate.runOneIteration();
Assert.assertFalse(atomicBoolean.get());
t2.workingState = WorkUnitState.WorkingState.FAILED;
predicate.runOneIteration();
Assert.assertTrue(atomicBoolean.get());
atomicBoolean.set(false);
t2.workingState = WorkUnitState.WorkingState.RUNNING;
predicate.runOneIteration();
Assert.assertFalse(atomicBoolean.get());
jobProgress.completedTasks = 6;
predicate.runOneIteration();
Assert.assertTrue(atomicBoolean.get());
}
@Getter
@AllArgsConstructor
public static class SettableJobProgress implements JobProgress {
private final String jobId;
private int taskCount;
private int completedTasks;
private long elapsedTime;
private JobState.RunningState runningState;
private List<TaskProgress> taskProgress;
@Override
public JobState.RunningState getState() {
return this.runningState;
}
}
@Getter
@AllArgsConstructor
public static class SettableTaskProgress implements TaskProgress {
private final String jobId;
private final String taskId;
private WorkUnitState.WorkingState workingState;
private boolean isCompleted;
}
}
| 1,306 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/api/TestJobExecutionState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.util.Properties;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobState.RunningState;
import org.apache.gobblin.runtime.std.JobExecutionUpdatable;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.ExecutorsUtils;
import static org.apache.gobblin.configuration.ConfigurationKeys.JOB_NAME_KEY;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
/**
* Unit tests for {@link JobExecutionState}
*/
public class TestJobExecutionState {
@Test public void testStateTransitionsSuccess() throws TimeoutException, InterruptedException {
final Logger log = LoggerFactory.getLogger(getClass().getSimpleName() + ".testStateTransitionsSuccess");
JobSpec js1 = JobSpec.builder("gobblin:/testStateTransitionsSuccess/job1")
.withConfig(ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob")))
.build();
JobExecution je1 = JobExecutionUpdatable.createFromJobSpec(js1);
final JobExecutionStateListener listener = mock(JobExecutionStateListener.class);
final JobExecutionState jes1 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
// Current state is null
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertFailedStateTransition(jes1, RunningState.FAILED);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.CANCELLED);
assertTransition(jes1, listener, null, RunningState.PENDING, log);
// Current state is PENDING
assertFailedStateTransition(jes1, RunningState.PENDING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertTransition(jes1, listener, RunningState.PENDING, RunningState.RUNNING, log);
// Current state is RUNNING
assertFailedStateTransition(jes1, RunningState.PENDING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertTransition(jes1, listener, RunningState.RUNNING, RunningState.SUCCESSFUL, log);
// Current state is SUCCESSFUL
assertFailedStateTransition(jes1, RunningState.PENDING);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertTransition(jes1, listener, RunningState.SUCCESSFUL, RunningState.COMMITTED, log);
// Current state is COMMITTED (final)
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertFailedStateTransition(jes1, RunningState.FAILED);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.CANCELLED);
}
@Test public void testStateTransitionsFailure() throws TimeoutException, InterruptedException {
final Logger log = LoggerFactory.getLogger(getClass().getSimpleName() + ".testStateTransitionsFailure");
JobSpec js1 = JobSpec.builder("gobblin:/testStateTransitionsFailure/job1")
.withConfig(ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob")))
.build();
JobExecution je1 = JobExecutionUpdatable.createFromJobSpec(js1);
final JobExecutionStateListener listener = mock(JobExecutionStateListener.class);
final JobExecutionState jes1 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(jes1, listener, null, RunningState.PENDING, log);
assertTransition(jes1, listener, RunningState.PENDING, RunningState.FAILED, log);
// Current state is FAILED (final)
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertFailedStateTransition(jes1, RunningState.FAILED);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.CANCELLED);
final JobExecutionState jes2 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(jes2, listener, null, RunningState.PENDING, log);
assertTransition(jes2, listener, RunningState.PENDING, RunningState.RUNNING, log);
assertTransition(jes2, listener, RunningState.RUNNING, RunningState.FAILED, log);
final JobExecutionState je3 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(je3, listener, null, RunningState.PENDING, log);
assertTransition(je3, listener, RunningState.PENDING, RunningState.RUNNING, log);
assertTransition(je3, listener, RunningState.RUNNING, RunningState.SUCCESSFUL, log);
assertTransition(je3, listener, RunningState.SUCCESSFUL, RunningState.FAILED, log);
}
@Test public void testStateTransitionsCancel() throws TimeoutException, InterruptedException {
final Logger log = LoggerFactory.getLogger(getClass().getSimpleName() + ".testStateTransitionsCancel");
JobSpec js1 = JobSpec.builder("gobblin:/testStateTransitionsCancel/job1")
.withConfig(ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob")))
.build();
JobExecution je1 = JobExecutionUpdatable.createFromJobSpec(js1);
final JobExecutionStateListener listener = mock(JobExecutionStateListener.class);
final JobExecutionState jes1 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(jes1, listener, null, RunningState.PENDING, log);
assertTransition(jes1, listener, RunningState.PENDING, RunningState.CANCELLED, log);
// Current state is CANCELLED (final)
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.COMMITTED);
assertFailedStateTransition(jes1, RunningState.SUCCESSFUL);
assertFailedStateTransition(jes1, RunningState.FAILED);
assertFailedStateTransition(jes1, RunningState.RUNNING);
assertFailedStateTransition(jes1, RunningState.CANCELLED);
final JobExecutionState jes2 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(jes2, listener, null, RunningState.PENDING, log);
assertTransition(jes2, listener, RunningState.PENDING, RunningState.RUNNING, log);
assertTransition(jes2, listener, RunningState.RUNNING, RunningState.CANCELLED, log);
final JobExecutionState je3 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
assertTransition(je3, listener, null, RunningState.PENDING, log);
assertTransition(je3, listener, RunningState.PENDING, RunningState.RUNNING, log);
assertTransition(je3, listener, RunningState.RUNNING, RunningState.SUCCESSFUL, log);
assertTransition(je3, listener, RunningState.SUCCESSFUL, RunningState.CANCELLED, log);
}
private void assertFailedStateTransition(final JobExecutionState jes1, RunningState newState) {
try {
jes1.setRunningState(newState);
Assert.fail("Exception expected");
}
catch (IllegalStateException e) {
// OK
}
}
private void assertTransition(final JobExecutionState jes1,
final JobExecutionStateListener listener,
final RunningState fromState, final RunningState toState,
final Logger log) throws TimeoutException, InterruptedException {
jes1.setRunningState(toState);
Assert.assertEquals(jes1.getRunningState(), toState);
AssertWithBackoff.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
try {
verify(listener).onStatusChange(eq(jes1), eq(fromState), eq(toState));
}
catch (Throwable t) {
// ignore
}
return true;
}
}, 50, "expecting state callback", log, 2.0, 10);
}
@Test public void testAwait() throws InterruptedException {
final Logger log = LoggerFactory.getLogger(getClass().getSimpleName() + ".testAwait");
Properties properties = new Properties();
properties.setProperty(JOB_NAME_KEY, "jobname");
JobSpec js1 = JobSpec.builder("gobblin:/testAwaitForDone/job1")
.withConfigAsProperties(properties)
.build();
JobExecution je1 = JobExecutionUpdatable.createFromJobSpec(js1);
final JobExecutionState jes1 =
new JobExecutionState(js1, je1, Optional.<JobExecutionStateListener>absent());
final AtomicBoolean doneDetected = new AtomicBoolean(false);
ThreadFactory doneThreadFactory =
ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of("doneDetectionThread"));
Thread doneDetectionThread = doneThreadFactory.newThread(new Runnable() {
@Override public void run() {
try {
jes1.awaitForDone(0);
} catch (InterruptedException | TimeoutException e) {
log.error("Error detected: " + e);
}
doneDetected.set(jes1.getRunningState().isDone());
}
});
doneDetectionThread.start();
long startTime = System.currentTimeMillis();
try {
jes1.awaitForState(RunningState.RUNNING, 10);
Assert.fail("Timeout expected");
} catch (TimeoutException e) {
long now = System.currentTimeMillis();
Assert.assertTrue(now - startTime >= 10, "Insufficient wait: " + (now - startTime));
}
jes1.switchToPending();
jes1.switchToRunning();
try {
jes1.awaitForState(RunningState.RUNNING, 10);
Assert.assertEquals(jes1.getRunningState(), RunningState.RUNNING);
} catch (TimeoutException e) {
Assert.fail("Timeout: ");
}
Assert.assertTrue(doneDetectionThread.isAlive());
jes1.switchToFailed();
doneDetectionThread.join(50);
Assert.assertFalse(doneDetectionThread.isAlive());
Assert.assertTrue(doneDetected.get());
}
}
| 1,307 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/api/TestJobSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Properties;
import org.apache.commons.lang3.SerializationUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* Unit tests for {@link JobSpec}
*/
public class TestJobSpec {
@Test
public void testBuilder() throws URISyntaxException {
JobSpec.Builder b = new JobSpec.Builder("test:job");
JobSpec js1 = b.build();
Assert.assertEquals(js1.getUri().toString(), "test:job");
Assert.assertEquals(js1.getVersion(), "1");
Assert.assertNotNull(js1.getDescription());
Assert.assertTrue(js1.getDescription().contains("test:job"));
Assert.assertEquals(js1.getConfig().entrySet().size(), 0);
Assert.assertEquals(js1.getConfigAsProperties().size(), 0);
Properties props = new Properties();
props.put("a1", "a_value");
props.put("a2.b", "1");
props.put("a2.c.d", "12.34");
props.put("a2.c.d2", "true");
b = new JobSpec.Builder("test:job2")
.withVersion("2")
.withDescription("A test job")
.withConfigAsProperties(props);
JobSpec js2 = b.build();
Assert.assertEquals(js2.getUri().toString(), "test:job2");
Assert.assertEquals(js2.getVersion(), "2");
Assert.assertEquals(js2.getDescription(), "A test job");
Assert.assertEquals(js2.getConfig().getString("a1"), "a_value");
Assert.assertEquals(js2.getConfig().getLong("a2.b"), 1L);
Assert.assertEquals(js2.getConfig().getDouble("a2.c.d"), 12.34);
Assert.assertTrue(js2.getConfig().getBoolean("a2.c.d2"));
Config cfg =
ConfigFactory.empty()
.withValue("a1", ConfigValueFactory.fromAnyRef("some_string"))
.withValue("a2.b", ConfigValueFactory.fromAnyRef(-1))
.withValue("a2.c.d", ConfigValueFactory.fromAnyRef(1.2))
.withValue("a2.e.f", ConfigValueFactory.fromAnyRef(true));
b = new JobSpec.Builder("test:job")
.withVersion("3")
.withDescription("A test job")
.withConfig(cfg);
JobSpec js3 = b.build();
Assert.assertEquals(js3.getUri().toString(), "test:job");
Assert.assertEquals(js3.getVersion(), "3");
Assert.assertEquals(js3.getDescription(), "A test job");
Assert.assertEquals(js3.getConfigAsProperties().getProperty("a1"), "some_string");
Assert.assertEquals(js3.getConfigAsProperties().getProperty("a2.b"), "-1");
Assert.assertEquals(js3.getConfigAsProperties().getProperty("a2.c.d"), "1.2");
Assert.assertEquals(js3.getConfigAsProperties().getProperty("a2.e.f"), "true");
Config cfg2 =
ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob"))
.withValue(ConfigurationKeys.JOB_GROUP_KEY, ConfigValueFactory.fromAnyRef("myGroup"))
.withValue(ConfigurationKeys.JOB_DESCRIPTION_KEY, ConfigValueFactory.fromAnyRef("Awesome job"));
b = new JobSpec.Builder().withConfig(cfg2);
JobSpec js4 = b.build();
Assert.assertEquals(js4.getUri(), new URI(JobSpec.Builder.DEFAULT_JOB_CATALOG_SCHEME +
":/myGroup/myJob"));
Assert.assertEquals(js4.getDescription(), "Awesome job");
b = new JobSpec.Builder().withConfig(cfg2).withJobCatalogURI("my-jobs:/");
JobSpec js5 = b.build();
Assert.assertEquals(js5.getUri(), new URI("my-jobs:/myGroup/myJob"));
}
@Test
public void testSerDe() {
JobSpec.Builder b = new JobSpec.Builder("test:job");
JobSpec js1 = b.build();
byte[] serializedBytes = SerializationUtils.serialize(js1);
JobSpec js1Deserialized = SerializationUtils.deserialize(serializedBytes);
Assert.assertEquals(js1Deserialized.getUri().toString(), js1.getUri().toString());
Assert.assertEquals(js1Deserialized.getVersion(), js1.getVersion());
Assert.assertNotNull(js1Deserialized.getDescription());
Assert.assertTrue(js1Deserialized.getDescription().contains(js1.getDescription()));
Assert.assertEquals(js1Deserialized.getConfig().entrySet().size(), 0);
Assert.assertEquals(js1Deserialized.getConfigAsProperties().size(), 0);
Properties props = new Properties();
props.put("a1", "a_value");
props.put("a2.b", "1");
props.put("a2.c.d", "12.34");
props.put("a2.c.d2", "true");
b = new JobSpec.Builder("test:job2")
.withVersion("2")
.withDescription("A test job")
.withConfigAsProperties(props);
JobSpec js2 = b.build();
serializedBytes = SerializationUtils.serialize(js2);
JobSpec js2Deserialized = SerializationUtils.deserialize(serializedBytes);
Assert.assertEquals(js2Deserialized.getUri().toString(), js2.getUri().toString());
Assert.assertEquals(js2Deserialized.getVersion(), js2.getVersion());
Assert.assertEquals(js2Deserialized.getDescription(), js2.getDescription());
Assert.assertEquals(js2Deserialized.getConfig().getString("a1"), "a_value");
Assert.assertEquals(js2Deserialized.getConfig().getLong("a2.b"), 1L);
Assert.assertEquals(js2Deserialized.getConfig().getDouble("a2.c.d"), 12.34);
Assert.assertTrue(js2Deserialized.getConfig().getBoolean("a2.c.d2"));
}
}
| 1,308 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/api/FsSpecProducerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.Pair;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.util.ConfigUtils;
public class FsSpecProducerTest {
private FsSpecProducer _fsSpecProducer;
private FsSpecConsumer _fsSpecConsumer;
private Config _config;
private File workDir;
@BeforeMethod
public void setUp()
throws IOException {
this.workDir = Files.createTempDir();
this.workDir.deleteOnExit();
Config config = ConfigFactory.empty().withValue(FsSpecConsumer.SPEC_PATH_KEY, ConfigValueFactory.fromAnyRef(
this.workDir.getAbsolutePath()));
this._fsSpecProducer = new FsSpecProducer(config);
this._fsSpecConsumer = new FsSpecConsumer(config);
this._config = config;
}
private JobSpec createTestJobSpec() throws URISyntaxException {
return createTestJobSpec("testJob");
}
private JobSpec createTestJobSpec(String jobSpecUri) throws URISyntaxException {
Properties properties = new Properties();
properties.put("key1", "val1");
properties.put("key2", "val2");
//Introduce a key which is a prefix of another key and ensure it is correctly handled in the code
properties.put("key3.1", "val3");
properties.put("key3.1.1", "val4");
JobSpec jobSpec = JobSpec.builder(jobSpecUri)
.withConfig(ConfigUtils.propertiesToConfig(properties))
.withVersion("1")
.withDescription("")
.withTemplate(new URI("FS:///")).build();
return jobSpec;
}
@Test
public void testAddSpec()
throws URISyntaxException, ExecutionException, InterruptedException, IOException {
this._fsSpecProducer.addSpec(createTestJobSpec());
// Add some random files(with non-avro extension name) into the folder observed by consumer, they shall not be picked up.
File randomFile = new File(workDir, "random");
Assert.assertTrue(randomFile.createNewFile());
randomFile.deleteOnExit();
List<Pair<SpecExecutor.Verb, Spec>> jobSpecs = this._fsSpecConsumer.changedSpecs().get();
Assert.assertEquals(jobSpecs.size(), 1);
Assert.assertEquals(jobSpecs.get(0).getLeft(), SpecExecutor.Verb.ADD);
Assert.assertEquals(jobSpecs.get(0).getRight().getUri().toString(), "testJob");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key1"), "val1");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key2"), "val2");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key3.1" + ConfigUtils.STRIP_SUFFIX), "val3");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key3.1.1"), "val4");
jobSpecs.clear();
// If there are other jobSpec in .avro files added by testSpecProducer, they shall still be found.
this._fsSpecProducer.addSpec(createTestJobSpec("newTestJob"));
jobSpecs = this._fsSpecConsumer.changedSpecs().get();
Assert.assertEquals(jobSpecs.size(), 2);
Assert.assertEquals(jobSpecs.get(0).getLeft(), SpecExecutor.Verb.ADD);
Assert.assertEquals(jobSpecs.get(1).getLeft(), SpecExecutor.Verb.ADD);
List<String> uriList = jobSpecs.stream().map(s -> s.getRight().getUri().toString()).collect(Collectors.toList());
Assert.assertTrue(uriList.contains( "testJob"));
Assert.assertTrue(uriList.contains( "newTestJob"));
}
@Test (dependsOnMethods = "testAddSpec")
public void testUpdateSpec() throws ExecutionException, InterruptedException, URISyntaxException {
this._fsSpecProducer.updateSpec(createTestJobSpec());
List<Pair<SpecExecutor.Verb, Spec>> jobSpecs = this._fsSpecConsumer.changedSpecs().get();
Assert.assertEquals(jobSpecs.size(), 1);
Assert.assertEquals(jobSpecs.get(0).getLeft(), SpecExecutor.Verb.UPDATE);
Assert.assertEquals(jobSpecs.get(0).getRight().getUri().toString(), "testJob");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key1"), "val1");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key2"), "val2");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key3.1" + ConfigUtils.STRIP_SUFFIX), "val3");
Assert.assertEquals(((JobSpec) jobSpecs.get(0).getRight()).getConfig().getString("key3.1.1"), "val4");
}
@Test (dependsOnMethods = "testUpdateSpec")
public void testDeleteSpec() throws URISyntaxException, ExecutionException, InterruptedException {
Properties headers = new Properties();
headers.put("headerProp1", "headerValue1");
this._fsSpecProducer.deleteSpec(new URI("testDeleteJob"), headers);
List<Pair<SpecExecutor.Verb, Spec>> jobSpecs = this._fsSpecConsumer.changedSpecs().get();
Assert.assertEquals(jobSpecs.size(), 1);
Assert.assertEquals(jobSpecs.get(0).getLeft(), SpecExecutor.Verb.DELETE);
Assert.assertEquals(jobSpecs.get(0).getRight().getUri().toString(), "testDeleteJob");
}
} | 1,309 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/api/MysqlMultiActiveLeaseArbiterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import com.typesafe.config.Config;
import java.io.IOException;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.apache.gobblin.runtime.api.MysqlMultiActiveLeaseArbiter.*;
@Slf4j
public class MysqlMultiActiveLeaseArbiterTest {
private static final int EPSILON = 10000;
private static final int MORE_THAN_EPSILON = (int) (EPSILON * 1.1);
private static final int LINGER = 50000;
private static final String USER = "testUser";
private static final String PASSWORD = "testPassword";
private static final String TABLE = "mysql_multi_active_lease_arbiter_store";
private static final String flowGroup = "testFlowGroup";
private static final String flowName = "testFlowName";
private static final String flowExecutionId = "12345677";
// The following are considered unique because they correspond to different flow action types
private static DagActionStore.DagAction launchDagAction =
new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.LAUNCH);
private static DagActionStore.DagAction resumeDagAction =
new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.RESUME);
private static final long eventTimeMillis = System.currentTimeMillis();
private static final Timestamp dummyTimestamp = new Timestamp(99999);
private MysqlMultiActiveLeaseArbiter mysqlMultiActiveLeaseArbiter;
private String formattedAcquireLeaseIfMatchingAllStatement =
String.format(CONDITIONALLY_ACQUIRE_LEASE_IF_MATCHING_ALL_COLS_STATEMENT, TABLE);
private String formattedAcquireLeaseIfFinishedStatement =
String.format(CONDITIONALLY_ACQUIRE_LEASE_IF_FINISHED_LEASING_STATEMENT, TABLE);
// The setup functionality verifies that the initialization of the tables is done correctly and verifies any SQL
// syntax errors.
@BeforeClass
public void setUp() throws Exception {
ITestMetastoreDatabase testDb = TestMetastoreDatabaseFactory.get();
Config config = ConfigBuilder.create()
.addPrimitive(ConfigurationKeys.SCHEDULER_EVENT_EPSILON_MILLIS_KEY, EPSILON)
.addPrimitive(ConfigurationKeys.SCHEDULER_EVENT_LINGER_MILLIS_KEY, LINGER)
.addPrimitive(ConfigurationKeys.MYSQL_LEASE_ARBITER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_URL_KEY, testDb.getJdbcUrl())
.addPrimitive(ConfigurationKeys.MYSQL_LEASE_ARBITER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_USER_KEY, USER)
.addPrimitive(ConfigurationKeys.MYSQL_LEASE_ARBITER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, PASSWORD)
.addPrimitive(ConfigurationKeys.SCHEDULER_LEASE_DETERMINATION_STORE_DB_TABLE_KEY, TABLE)
.build();
this.mysqlMultiActiveLeaseArbiter = new MysqlMultiActiveLeaseArbiter(config);
}
/*
Tests all cases of trying to acquire a lease (CASES 1-6 detailed below) for a flow action event with one
participant involved.
*/
// TODO: refactor this to break it into separate test cases as much is possible
@Test
public void testAcquireLeaseSingleParticipant() throws Exception {
// Tests CASE 1 of acquire lease for a flow action event not present in DB
MultiActiveLeaseArbiter.LeaseAttemptStatus firstLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(firstLaunchStatus instanceof MultiActiveLeaseArbiter.LeaseObtainedStatus);
MultiActiveLeaseArbiter.LeaseObtainedStatus firstObtainedStatus =
(MultiActiveLeaseArbiter.LeaseObtainedStatus) firstLaunchStatus;
Assert.assertTrue(firstObtainedStatus.getEventTimeMillis() <=
firstObtainedStatus.getLeaseAcquisitionTimestamp());
Assert.assertTrue(firstObtainedStatus.getFlowAction().equals(
new DagActionStore.DagAction(flowGroup, flowName, String.valueOf(firstObtainedStatus.getEventTimeMillis()),
DagActionStore.FlowActionType.LAUNCH)));
// Verify that different DagAction types for the same flow can have leases at the same time
DagActionStore.DagAction killDagAction = new
DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL);
MultiActiveLeaseArbiter.LeaseAttemptStatus killStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(killDagAction, eventTimeMillis, false);
Assert.assertTrue(killStatus instanceof MultiActiveLeaseArbiter.LeaseObtainedStatus);
MultiActiveLeaseArbiter.LeaseObtainedStatus killObtainedStatus =
(MultiActiveLeaseArbiter.LeaseObtainedStatus) killStatus;
Assert.assertTrue(
killObtainedStatus.getLeaseAcquisitionTimestamp() >= killObtainedStatus.getEventTimeMillis());
// Tests CASE 2 of acquire lease for a flow action event that already has a valid lease for the same event in db
// Very little time should have passed if this test directly follows the one above so this call will be considered
// the same as the previous event
MultiActiveLeaseArbiter.LeaseAttemptStatus secondLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(secondLaunchStatus instanceof MultiActiveLeaseArbiter.LeasedToAnotherStatus);
MultiActiveLeaseArbiter.LeasedToAnotherStatus secondLeasedToAnotherStatus =
(MultiActiveLeaseArbiter.LeasedToAnotherStatus) secondLaunchStatus;
Assert.assertEquals(firstObtainedStatus.getEventTimeMillis(), secondLeasedToAnotherStatus.getEventTimeMillis());
Assert.assertTrue(secondLeasedToAnotherStatus.getMinimumLingerDurationMillis() > 0);
// Tests CASE 3 of trying to acquire a lease for a distinct flow action event, while the previous event's lease is
// valid
// Allow enough time to pass for this trigger to be considered distinct, but not enough time so the lease expires
Thread.sleep(MORE_THAN_EPSILON);
MultiActiveLeaseArbiter.LeaseAttemptStatus thirdLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(thirdLaunchStatus instanceof MultiActiveLeaseArbiter.LeasedToAnotherStatus);
MultiActiveLeaseArbiter.LeasedToAnotherStatus thirdLeasedToAnotherStatus =
(MultiActiveLeaseArbiter.LeasedToAnotherStatus) thirdLaunchStatus;
Assert.assertTrue(thirdLeasedToAnotherStatus.getEventTimeMillis() > firstObtainedStatus.getEventTimeMillis());
Assert.assertTrue(thirdLeasedToAnotherStatus.getMinimumLingerDurationMillis() < LINGER);
// Tests CASE 4 of lease out of date
Thread.sleep(LINGER);
MultiActiveLeaseArbiter.LeaseAttemptStatus fourthLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(fourthLaunchStatus instanceof MultiActiveLeaseArbiter.LeaseObtainedStatus);
MultiActiveLeaseArbiter.LeaseObtainedStatus fourthObtainedStatus =
(MultiActiveLeaseArbiter.LeaseObtainedStatus) fourthLaunchStatus;
Assert.assertTrue(fourthObtainedStatus.getEventTimeMillis() > eventTimeMillis + LINGER);
Assert.assertTrue(fourthObtainedStatus.getEventTimeMillis()
<= fourthObtainedStatus.getLeaseAcquisitionTimestamp());
// Tests CASE 5 of no longer leasing the same event in DB
// done immediately after previous lease obtainment so should be marked as the same event
Assert.assertTrue(mysqlMultiActiveLeaseArbiter.recordLeaseSuccess(fourthObtainedStatus));
Assert.assertTrue(System.currentTimeMillis() - fourthObtainedStatus.getEventTimeMillis() < EPSILON);
MultiActiveLeaseArbiter.LeaseAttemptStatus fifthLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(fifthLaunchStatus instanceof MultiActiveLeaseArbiter.NoLongerLeasingStatus);
// Tests CASE 6 of no longer leasing a distinct event in DB
// Wait so this event is considered distinct and a new lease will be acquired
Thread.sleep(MORE_THAN_EPSILON);
MultiActiveLeaseArbiter.LeaseAttemptStatus sixthLaunchStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(launchDagAction, eventTimeMillis, false);
Assert.assertTrue(sixthLaunchStatus instanceof MultiActiveLeaseArbiter.LeaseObtainedStatus);
MultiActiveLeaseArbiter.LeaseObtainedStatus sixthObtainedStatus =
(MultiActiveLeaseArbiter.LeaseObtainedStatus) sixthLaunchStatus;
Assert.assertTrue(sixthObtainedStatus.getEventTimeMillis()
<= sixthObtainedStatus.getLeaseAcquisitionTimestamp());
}
/*
Tests attemptLeaseIfNewRow() method to ensure a new row is inserted if no row matches the primary key in the table.
If such a row does exist, the method should disregard the resulting SQL error and return 0 rows updated, indicating
the lease was not acquired.
Note: this isolates and tests CASE 1 in which another participant could have acquired the lease between the time
the read was done and subsequent write was carried out
*/
@Test (dependsOnMethods = "testAcquireLeaseSingleParticipant")
public void testAcquireLeaseIfNewRow() throws IOException {
// Inserting the first time should update 1 row
Assert.assertEquals(mysqlMultiActiveLeaseArbiter.attemptLeaseIfNewRow(resumeDagAction), 1);
// Inserting the second time should not update any rows
Assert.assertEquals(mysqlMultiActiveLeaseArbiter.attemptLeaseIfNewRow(resumeDagAction), 0);
}
/*
Tests CONDITIONALLY_ACQUIRE_LEASE_IF_MATCHING_ALL_COLS_STATEMENT to ensure insertion is not completed if another
participant updated the table between the prior reed and attempted insertion.
Note: this isolates and tests CASE 4 in which a flow action event has an out of date lease, so a participant
attempts a new one given the table the eventTimestamp and leaseAcquisitionTimestamp values are unchanged.
*/
@Test (dependsOnMethods = "testAcquireLeaseIfNewRow")
public void testConditionallyAcquireLeaseIfFMatchingAllColsStatement() throws IOException {
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
// The following insert will fail since the eventTimestamp does not match
int numRowsUpdated = mysqlMultiActiveLeaseArbiter.attemptLeaseIfExistingRow(
formattedAcquireLeaseIfMatchingAllStatement, resumeDagAction, true, true,
dummyTimestamp, new Timestamp(selectInfoResult.getLeaseAcquisitionTimeMillis().get()));
Assert.assertEquals(numRowsUpdated, 0);
// The following insert will fail since the leaseAcquisitionTimestamp does not match
numRowsUpdated = mysqlMultiActiveLeaseArbiter.attemptLeaseIfExistingRow(
formattedAcquireLeaseIfMatchingAllStatement, resumeDagAction, true, true,
new Timestamp(selectInfoResult.getEventTimeMillis()), dummyTimestamp);
Assert.assertEquals(numRowsUpdated, 0);
// This insert should work since the values match all the columns
numRowsUpdated = mysqlMultiActiveLeaseArbiter.attemptLeaseIfExistingRow(
formattedAcquireLeaseIfMatchingAllStatement, resumeDagAction, true, true,
new Timestamp(selectInfoResult.getEventTimeMillis()),
new Timestamp(selectInfoResult.getLeaseAcquisitionTimeMillis().get()));
Assert.assertEquals(numRowsUpdated, 1);
}
/*
Tests CONDITIONALLY_ACQUIRE_LEASE_IF_FINISHED_LEASING_STATEMENT to ensure the insertion will only succeed if another
participant has not updated the eventTimestamp state since the prior read.
Note: This isolates and tests CASE 6 during which current participant saw a distinct flow action event had completed
its prior lease, encouraging the current participant to acquire a lease for its event.
*/
@Test (dependsOnMethods = "testConditionallyAcquireLeaseIfFMatchingAllColsStatement")
public void testConditionallyAcquireLeaseIfFinishedLeasingStatement()
throws IOException, InterruptedException, SQLException {
// Mark the resume action lease from above as completed by fabricating a LeaseObtainedStatus
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
DagActionStore.DagAction updatedResumeDagAction = resumeDagAction.updateFlowExecutionId(
selectInfoResult.getEventTimeMillis());
boolean markedSuccess = mysqlMultiActiveLeaseArbiter.recordLeaseSuccess(new LeaseObtainedStatus(
updatedResumeDagAction, selectInfoResult.getLeaseAcquisitionTimeMillis().get()));
Assert.assertTrue(markedSuccess);
// Ensure no NPE results from calling this after a lease has been completed and acquisition timestamp val is NULL
mysqlMultiActiveLeaseArbiter.evaluateStatusAfterLeaseAttempt(1, resumeDagAction,
Optional.empty(), false);
// The following insert will fail since eventTimestamp does not match the expected
int numRowsUpdated = mysqlMultiActiveLeaseArbiter.attemptLeaseIfExistingRow(
formattedAcquireLeaseIfFinishedStatement, resumeDagAction, true, false,
dummyTimestamp, null);
Assert.assertEquals(numRowsUpdated, 0);
// This insert does match since we utilize the right eventTimestamp
numRowsUpdated = mysqlMultiActiveLeaseArbiter.attemptLeaseIfExistingRow(
formattedAcquireLeaseIfFinishedStatement, resumeDagAction, true, false,
new Timestamp(selectInfoResult.getEventTimeMillis()), null);
Assert.assertEquals(numRowsUpdated, 1);
}
/*
Tests calling `tryAcquireLease` for an older reminder event which should be immediately returned as `NoLongerLeasing`
*/
@Test (dependsOnMethods = "testConditionallyAcquireLeaseIfFinishedLeasingStatement")
public void testOlderReminderEventAcquireLease() throws IOException {
// Read database to obtain existing db eventTimeMillis and use it to construct an older event
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
long olderEventTimestamp = selectInfoResult.getEventTimeMillis() - 1;
LeaseAttemptStatus attemptStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(resumeDagAction, olderEventTimestamp, true);
Assert.assertTrue(attemptStatus instanceof NoLongerLeasingStatus);
}
/*
Tests calling `tryAcquireLease` for a reminder event for which a valid lease exists in the database. We don't expect
this case to occur because the reminderEvent should be triggered after the lease expires, but ensure it's handled
correctly anyway.
*/
@Test (dependsOnMethods = "testOlderReminderEventAcquireLease")
public void testReminderEventAcquireLeaseOnValidLease() throws IOException {
// Read database to obtain existing db eventTimeMillis and re-use it for the reminder event time
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
LeaseAttemptStatus attemptStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(resumeDagAction, selectInfoResult.getEventTimeMillis(), true);
Assert.assertTrue(attemptStatus instanceof LeasedToAnotherStatus);
LeasedToAnotherStatus leasedToAnotherStatus = (LeasedToAnotherStatus) attemptStatus;
Assert.assertEquals(leasedToAnotherStatus.getEventTimeMillis(), selectInfoResult.getEventTimeMillis());
}
/*
Tests calling `tryAcquireLease` for a reminder event whose lease has expired in the database and should successfully
acquire a new lease
*/
@Test (dependsOnMethods = "testReminderEventAcquireLeaseOnValidLease")
public void testReminderEventAcquireLeaseOnInvalidLease() throws IOException, InterruptedException {
// Read database to obtain existing db eventTimeMillis and wait enough time for the lease to expire
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
Thread.sleep(LINGER);
LeaseAttemptStatus attemptStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(resumeDagAction, selectInfoResult.getEventTimeMillis(), true);
Assert.assertTrue(attemptStatus instanceof LeaseObtainedStatus);
LeaseObtainedStatus obtainedStatus = (LeaseObtainedStatus) attemptStatus;
Assert.assertTrue(obtainedStatus.getEventTimeMillis() > selectInfoResult.getEventTimeMillis());
Assert.assertTrue(obtainedStatus.getLeaseAcquisitionTimestamp() > selectInfoResult.getLeaseAcquisitionTimeMillis().get().longValue());
}
/*
Tests calling `tryAcquireLease` for a reminder event whose lease has completed in the database and should return
`NoLongerLeasing` status.
Note: that we wait for enough time to pass that the event would have been considered distinct for a non-reminder case
to ensure that the comparison made for reminder events is against the preserved event time not current time in db
*/
@Test (dependsOnMethods = "testReminderEventAcquireLeaseOnInvalidLease")
public void testReminderEventAcquireLeaseOnCompletedLease() throws IOException, InterruptedException {
// Mark the resume action lease from above as completed by fabricating a LeaseObtainedStatus
MysqlMultiActiveLeaseArbiter.SelectInfoResult selectInfoResult =
mysqlMultiActiveLeaseArbiter.getRowInfo(resumeDagAction);
DagActionStore.DagAction updatedResumeDagAction = resumeDagAction.updateFlowExecutionId(
selectInfoResult.getEventTimeMillis());
boolean markedSuccess = mysqlMultiActiveLeaseArbiter.recordLeaseSuccess(new LeaseObtainedStatus(
updatedResumeDagAction, selectInfoResult.getLeaseAcquisitionTimeMillis().get()));
Assert.assertTrue(markedSuccess);
// Sleep enough time for the event to have been considered distinct
Thread.sleep(MORE_THAN_EPSILON);
// Now have a reminder event check-in on the completed lease
LeaseAttemptStatus attemptStatus =
mysqlMultiActiveLeaseArbiter.tryAcquireLease(resumeDagAction, selectInfoResult.getEventTimeMillis(), true);
Assert.assertTrue(attemptStatus instanceof NoLongerLeasingStatus);
}
}
| 1,310 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/api/SecureJobTemplateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.net.URI;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class SecureJobTemplateTest {
@Test
public void test() {
SecureJobTemplate template = Mockito.mock(SecureJobTemplate.class);
Mockito.when(template.getUri()).thenReturn(URI.create("my://template"));
Mockito.when(template.isSecure()).thenReturn(true);
Mockito.when(template.overridableProperties()).thenReturn(Lists.newArrayList("my.overridable.property1", "my.overridable.property2"));
// Test simple filtering
Config config = ConfigFactory.parseMap(ImmutableMap.of(
"someProperty", "foo",
"my.overridable.property1", "bar"
));
Config result = SecureJobTemplate.filterUserConfig(template, config, log);
Assert.assertEquals(result.entrySet().size(), 1);
Assert.assertEquals(result.getString("my.overridable.property1"), "bar");
Assert.assertFalse(result.hasPath("someProperty"));
Assert.assertFalse(result.hasPath("my.overridable.property2"));
// Test allowing override of a subconfig
config = ConfigFactory.parseMap(ImmutableMap.of(
"someProperty", "foo",
"my.overridable.property1.key1", "bar",
"my.overridable.property1.key2", "baz"
));
result = SecureJobTemplate.filterUserConfig(template, config, log);
Assert.assertEquals(result.entrySet().size(), 2);
Assert.assertEquals(result.getString("my.overridable.property1.key1"), "bar");
Assert.assertEquals(result.getString("my.overridable.property1.key2"), "baz");
// Test multiple overrides
config = ConfigFactory.parseMap(ImmutableMap.of(
"someProperty", "foo",
"my.overridable.property1", "bar",
"my.overridable.property2", "baz"
));
result = SecureJobTemplate.filterUserConfig(template, config, log);
Assert.assertEquals(result.entrySet().size(), 2);
Assert.assertEquals(result.getString("my.overridable.property1"), "bar");
Assert.assertEquals(result.getString("my.overridable.property2"), "baz");
}
}
| 1,311 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/spec_executorInstance/MockedSpecExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_executorInstance;
import java.net.URI;
import java.util.Properties;
import java.util.concurrent.Future;
import org.mockito.Mockito;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.util.CompletedFuture;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.when;
public class MockedSpecExecutor extends InMemorySpecExecutor {
private SpecProducer<Spec> mockedSpecProducer;
public MockedSpecExecutor(Config config) {
super(config);
this.mockedSpecProducer = Mockito.mock(SpecProducer.class);
when(mockedSpecProducer.addSpec(any())).thenReturn(new CompletedFuture(Boolean.TRUE, null));
when(mockedSpecProducer.serializeAddSpecResponse(any())).thenReturn("");
when(mockedSpecProducer.deserializeAddSpecResponse(any())).thenReturn(new CompletedFuture(Boolean.TRUE, null));
}
public static SpecExecutor createDummySpecExecutor(URI uri) {
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, uri.toString());
return new MockedSpecExecutor(ConfigFactory.parseProperties(properties));
}
@Override
public Future<? extends SpecProducer<Spec>> getProducer(){
return new CompletedFuture<>(this.mockedSpecProducer, null);
}
}
| 1,312 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/messaging | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/messaging/data/DynamicWorkUnitSerdeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.messaging.data;
import com.google.gson.Gson;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import lombok.extern.java.Log;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.*;
@Log
@Test
public class DynamicWorkUnitSerdeTest {
@Test
public void testSerialization() {
DynamicWorkUnitMessage msg = SplitWorkUnitMessage.builder()
.workUnitId("workUnitId")
.laggingTopicPartitions(Arrays.asList("topic-1","topic-2"))
.build();
byte[] serializedMsg = DynamicWorkUnitSerde.serialize(msg);
DynamicWorkUnitMessage deserializedMsg = DynamicWorkUnitSerde.deserialize(serializedMsg);
assertTrue(deserializedMsg instanceof SplitWorkUnitMessage);
assertEquals(msg, deserializedMsg);
}
@Test(expectedExceptions = DynamicWorkUnitDeserializationException.class)
public void testSerializationFails() {
DynamicWorkUnitMessage msg = SplitWorkUnitMessage.builder()
.workUnitId("workUnitId")
.laggingTopicPartitions(Arrays.asList("topic-1","topic-2"))
.build();
// Serializing without using the DynamicWorkUnitSerde#serialize method should cause a runtime exception
// when deserializing
Gson gson = new Gson();
byte[] serializedMsg = gson.toJson(msg).getBytes(StandardCharsets.UTF_8);
try {
DynamicWorkUnitMessage failsToDeserialize = DynamicWorkUnitSerde.deserialize(serializedMsg);
} catch(DynamicWorkUnitDeserializationException e) {
log.info("Successfully threw exception when failing to deserialize. exception=" + e);
throw e;
}
}
}
| 1,313 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/mapreduce/MRJobLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.jboss.byteman.contrib.bmunit.BMNGRunner;
import org.jboss.byteman.contrib.bmunit.BMRule;
import org.jboss.byteman.contrib.bmunit.BMRules;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.capability.Capability;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobLauncherTestHelper;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.util.limiter.BaseLimiterType;
import org.apache.gobblin.util.limiter.DefaultLimiterFactory;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.WriterOutputFormat;
/**
* Unit test for {@link MRJobLauncher}.
*/
@Test(groups = { "gobblin.runtime.mapreduce", "gobblin.runtime" }, singleThreaded=true)
public class MRJobLauncherTest extends BMNGRunner {
private final Logger testLogger = LoggerFactory.getLogger(MRJobLauncherTest.class);
private Properties launcherProps;
private JobLauncherTestHelper jobLauncherTestHelper;
private ITestMetastoreDatabase testMetastoreDatabase;
@BeforeClass
public void startUp() throws Exception {
this.testLogger.info("startUp: in");
testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
this.launcherProps = new Properties();
try (InputStream propsReader = getClass().getClassLoader().getResourceAsStream("gobblin.mr-test.properties")) {
this.launcherProps.load(propsReader);
}
this.launcherProps.setProperty(ConfigurationKeys.JOB_HISTORY_STORE_ENABLED_KEY, "true");
this.launcherProps.setProperty(ConfigurationKeys.METRICS_ENABLED_KEY, "true");
this.launcherProps.setProperty(ConfigurationKeys.METRICS_REPORTING_FILE_ENABLED_KEY, "false");
this.launcherProps.setProperty(ConfigurationKeys.JOB_HISTORY_STORE_URL_KEY,
testMetastoreDatabase.getJdbcUrl());
StateStore<JobState.DatasetState> datasetStateStore =
new FsStateStore<>(this.launcherProps.getProperty(ConfigurationKeys.STATE_STORE_FS_URI_KEY),
this.launcherProps.getProperty(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY), JobState.DatasetState.class);
this.jobLauncherTestHelper = new JobLauncherTestHelper(this.launcherProps, datasetStateStore);
// Other tests may not clean up properly, clean up outputDir and stagingDir or some of these tests might fail.
String outputDir = this.launcherProps.getProperty(ConfigurationKeys.WRITER_OUTPUT_DIR);
String stagingDir = this.launcherProps.getProperty(ConfigurationKeys.WRITER_STAGING_DIR);
FileUtils.deleteDirectory(new File(outputDir));
FileUtils.deleteDirectory(new File(stagingDir));
this.testLogger.info("startUp: out");
}
@Test
public void testLaunchJob() throws Exception {
final Logger log = LoggerFactory.getLogger(getClass().getName() + ".testLaunchJob");
log.info("in");
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJob");
try {
this.jobLauncherTestHelper.runTest(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
log.info("out");
}
@Test
public void testCleanUpMrJarsBaseDir() throws Exception {
File tmpDirectory = Files.createTempDir();
tmpDirectory.deleteOnExit();
FileSystem fs = FileSystem.get(new Configuration());
String baseJarDir = tmpDirectory.getAbsolutePath();
fs.mkdirs(new Path(baseJarDir, "2023-01"));
fs.mkdirs(new Path(baseJarDir, "2022-12"));
fs.mkdirs(new Path(baseJarDir, "2023-02"));
MRJobLauncher.cleanUpOldJarsDirIfRequired(FileSystem.get(new Configuration()), new Path(tmpDirectory.getPath()));
Assert.assertFalse(fs.exists(new Path(baseJarDir, "2022-12")));
Assert.assertTrue(fs.exists(new Path(baseJarDir, "2023-01")));
Assert.assertTrue(fs.exists(new Path(baseJarDir, "2023-02")));
fs.delete(new Path(baseJarDir), true);
}
@Test
public void testNumOfWorkunits() throws Exception {
Properties jobProps = loadJobProps();
JobContext jobContext;
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testNumOfWorkunits");
try {
jobContext = this.jobLauncherTestHelper.runTest(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
Assert.assertEquals(jobContext.getJobState().getPropAsInt(AbstractJobLauncher.NUM_WORKUNITS), 4);
jobProps.setProperty(ConfigurationKeys.WORK_UNIT_SKIP_KEY, Boolean.TRUE.toString());
try {
jobContext = this.jobLauncherTestHelper.runTest(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
Assert.assertEquals(jobContext.getJobState().getPropAsInt(AbstractJobLauncher.NUM_WORKUNITS), 2);
}
@Test
public void testLaunchJobWithConcurrencyLimit() throws Exception {
final Logger log = LoggerFactory.getLogger(getClass().getName() + ".testLaunchJobWithConcurrencyLimit");
log.info("in");
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithConcurrencyLimit");
jobProps.setProperty(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY, "2");
this.jobLauncherTestHelper.runTest(jobProps);
jobProps.setProperty(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY, "3");
this.jobLauncherTestHelper.runTest(jobProps);
jobProps.setProperty(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY, "5");
try {
this.jobLauncherTestHelper.runTest(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
log.info("out");
}
@Test
public void testLaunchJobWithPullLimit() throws Exception {
int limit = 10;
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithPullLimit");
jobProps.setProperty(ConfigurationKeys.EXTRACT_LIMIT_ENABLED_KEY, Boolean.TRUE.toString());
jobProps.setProperty(DefaultLimiterFactory.EXTRACT_LIMIT_TYPE_KEY, BaseLimiterType.COUNT_BASED.toString());
jobProps.setProperty(DefaultLimiterFactory.EXTRACT_LIMIT_COUNT_LIMIT_KEY, Integer.toString(10));
try {
this.jobLauncherTestHelper.runTestWithPullLimit(jobProps, limit);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test
public void testLaunchJobWithMultiWorkUnit() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithMultiWorkUnit");
jobProps.setProperty("use.multiworkunit", Boolean.toString(true));
try {
this.jobLauncherTestHelper.runTest(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test(groups = { "ignore" })
public void testCancelJob() throws Exception {
this.jobLauncherTestHelper.runTestWithCancellation(loadJobProps());
}
@Test
public void testLaunchJobWithFork() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithFork");
jobProps.setProperty(ConfigurationKeys.CONVERTER_CLASSES_KEY, "org.apache.gobblin.test.TestConverter2");
jobProps.setProperty(ConfigurationKeys.FORK_BRANCHES_KEY, "2");
jobProps.setProperty(ConfigurationKeys.ROW_LEVEL_POLICY_LIST + ".0",
"org.apache.gobblin.policies.schema.SchemaRowCheckPolicy");
jobProps.setProperty(ConfigurationKeys.ROW_LEVEL_POLICY_LIST + ".1",
"org.apache.gobblin.policies.schema.SchemaRowCheckPolicy");
jobProps.setProperty(ConfigurationKeys.ROW_LEVEL_POLICY_LIST_TYPE + ".0", "OPTIONAL");
jobProps.setProperty(ConfigurationKeys.ROW_LEVEL_POLICY_LIST_TYPE + ".1", "OPTIONAL");
jobProps.setProperty(ConfigurationKeys.TASK_LEVEL_POLICY_LIST + ".0",
"org.apache.gobblin.policies.count.RowCountPolicy,org.apache.gobblin.policies.schema.SchemaCompatibilityPolicy");
jobProps.setProperty(ConfigurationKeys.TASK_LEVEL_POLICY_LIST + ".1",
"org.apache.gobblin.policies.count.RowCountPolicy,org.apache.gobblin.policies.schema.SchemaCompatibilityPolicy");
jobProps.setProperty(ConfigurationKeys.TASK_LEVEL_POLICY_LIST_TYPE + ".0", "OPTIONAL,OPTIONAL");
jobProps.setProperty(ConfigurationKeys.TASK_LEVEL_POLICY_LIST_TYPE + ".1", "OPTIONAL,OPTIONAL");
jobProps.setProperty(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY + ".0", WriterOutputFormat.AVRO.name());
jobProps.setProperty(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY + ".1", WriterOutputFormat.AVRO.name());
jobProps.setProperty(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY + ".0", Destination.DestinationType.HDFS.name());
jobProps.setProperty(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY + ".1", Destination.DestinationType.HDFS.name());
jobProps.setProperty(ConfigurationKeys.WRITER_STAGING_DIR + ".0",
jobProps.getProperty(ConfigurationKeys.WRITER_STAGING_DIR));
jobProps.setProperty(ConfigurationKeys.WRITER_STAGING_DIR + ".1",
jobProps.getProperty(ConfigurationKeys.WRITER_STAGING_DIR));
jobProps.setProperty(ConfigurationKeys.WRITER_OUTPUT_DIR + ".0",
jobProps.getProperty(ConfigurationKeys.WRITER_OUTPUT_DIR));
jobProps.setProperty(ConfigurationKeys.WRITER_OUTPUT_DIR + ".1",
jobProps.getProperty(ConfigurationKeys.WRITER_OUTPUT_DIR));
jobProps.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR + ".0",
jobProps.getProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR));
jobProps.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR + ".1",
jobProps.getProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR));
try {
this.jobLauncherTestHelper.runTestWithFork(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
/**
* Byteman test that ensures the {@link MRJobLauncher} successfully cleans up all staging data even
* when an exception is thrown in the {@link MRJobLauncher#countersToMetrics(GobblinMetrics)} method.
* The {@link BMRule} is to inject an {@link IOException} when the
* {@link MRJobLauncher#countersToMetrics(GobblinMetrics)} method is called.
*/
@Test
@BMRule(name = "testJobCleanupOnError", targetClass = "org.apache.gobblin.runtime.mapreduce.MRJobLauncher",
targetMethod = "countersToMetrics(GobblinMetrics)", targetLocation = "AT ENTRY", condition = "true",
action = "throw new IOException(\"Exception for testJobCleanupOnError\")")
public void testJobCleanupOnError() throws IOException {
Properties props = loadJobProps();
try {
this.jobLauncherTestHelper.runTest(props);
Assert.fail("Byteman is not configured properly, the runTest method should have throw an exception");
} catch (Exception e) {
// The job should throw an exception, ignore it
} finally {
this.jobLauncherTestHelper.deleteStateStore(props.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
Assert.assertTrue(props.containsKey(ConfigurationKeys.WRITER_STAGING_DIR));
Assert.assertTrue(props.containsKey(ConfigurationKeys.WRITER_OUTPUT_DIR));
File stagingDir = new File(props.getProperty(ConfigurationKeys.WRITER_STAGING_DIR));
File outputDir = new File(props.getProperty(ConfigurationKeys.WRITER_OUTPUT_DIR));
Assert.assertEquals(FileUtils.listFiles(stagingDir, null, true).size(), 0);
if (outputDir.exists()) {
Assert.assertEquals(FileUtils.listFiles(outputDir, null, true).size(), 0);
}
}
// This test uses byteman to check that the ".suc" files are recorded in the task state store for successful
// tasks when there are some task failures.
// static variable to count the number of task success marker files written in this test case
public static int sucCount1 = 0;
@Test
@BMRules(rules = {
@BMRule(name = "saveSuccessCount", targetClass = "org.apache.gobblin.metastore.FsStateStore",
targetMethod = "put", targetLocation = "AT ENTRY", condition = "$2.endsWith(\".suc\")",
action = "org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.sucCount1 = org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.sucCount1 + 1")
})
public void testLaunchJobWithMultiWorkUnitAndFaultyExtractor() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithMultiWorkUnitAndFaultyExtractor");
jobProps.setProperty("use.multiworkunit", Boolean.toString(true));
try {
this.jobLauncherTestHelper.runTestWithCommitSuccessfulTasksPolicy(jobProps);
// three of the 4 tasks should have succeeded, so 3 suc files should have been written
Assert.assertEquals(sucCount1, 3);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
// This test case checks that if a ".suc" task state file exists for a task then it is skipped.
// This test also checks that ".suc" files are not written when there are no task failures.
// static variables accessed by byteman in this test case
public static WorkUnitState wus = null;
public static int sucCount2 = 0;
@Test
@BMRules(rules = {
@BMRule(name = "getWorkUnitState", targetClass = "org.apache.gobblin.runtime.GobblinMultiTaskAttempt",
targetMethod = "runWorkUnits", targetLocation = "AFTER WRITE $taskId", condition = "$taskId.endsWith(\"_1\")",
action = "org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.wus = new org.apache.gobblin.configuration.WorkUnitState($workUnit, $0.jobState)"),
@BMRule(name = "saveSuccessCount", targetClass = "org.apache.gobblin.metastore.FsStateStore",
targetMethod = "put", targetLocation = "AT ENTRY", condition = "$2.endsWith(\".suc\")",
action = "org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.sucCount2 = org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.sucCount2 + 1"),
@BMRule(name = "checkProp", targetClass = "org.apache.gobblin.runtime.mapreduce.MRJobLauncher$TaskRunner",
targetMethod = "setup", targetLocation = "AT EXIT",
condition = "!$0.jobState.getProp(\"DynamicKey1\").equals(\"DynamicValue1\")",
action = "throw new RuntimeException(\"could not find key\")"),
@BMRule(name = "writeSuccessFile", targetClass = "org.apache.gobblin.runtime.GobblinMultiTaskAttempt",
targetMethod = "taskSuccessfulInPriorAttempt", targetLocation = "AFTER WRITE $taskStateStore",
condition = "$1.endsWith(\"_1\")",
action = "$taskStateStore.put($0.jobId, $1 + \".suc\", new org.apache.gobblin.runtime.TaskState(org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest.wus))")
})
public void testLaunchJobWithMultiWorkUnitAndSucFile() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithMultiWorkUnitAndSucFile");
jobProps.setProperty("use.multiworkunit", Boolean.toString(true));
jobProps.setProperty("dynamicConfigGenerator.class",
"org.apache.gobblin.runtime.mapreduce.MRJobLauncherTest$TestDynamicConfigGenerator");
try {
this.jobLauncherTestHelper.runTestWithSkippedTask(jobProps, "_1");
// no failures, so the only success file written is the injected one
Assert.assertEquals(sucCount2, 1);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test
public void testLaunchJobWithMultipleDatasets() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithMultipleDatasets");
try {
this.jobLauncherTestHelper.runTestWithMultipleDatasets(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
/**
* Seems setting mapreduce.map.failures.maxpercent=100 does not prevent Hadoop2's LocalJobRunner from
* failing and aborting a job if any mapper task fails. Aborting the job causes its working directory
* to be deleted in {@link GobblinOutputCommitter}, which further fails this test since all the output
* {@link org.apache.gobblin.runtime.TaskState}s are deleted. There may be a bug in Hadoop2's LocalJobRunner.
*
* Also applicable to the two tests below.
*/
@Test(groups = { "ignore" })
public void testLaunchJobWithCommitSuccessfulTasksPolicy() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithCommitSuccessfulTasksPolicy");
try {
this.jobLauncherTestHelper.runTestWithCommitSuccessfulTasksPolicy(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test(groups = { "ignore" })
public void testLaunchJobWithMultipleDatasetsAndFaultyExtractor() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithMultipleDatasetsAndFaultyExtractor");
try {
this.jobLauncherTestHelper.runTestWithMultipleDatasetsAndFaultyExtractor(jobProps, false);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test(groups = { "ignore" })
public void testLaunchJobWithMultipleDatasetsAndFaultyExtractorAndPartialCommitPolicy() throws Exception {
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY, jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)
+ "-testLaunchJobWithMultipleDatasetsAndFaultyExtractorAndPartialCommitPolicy");
try {
this.jobLauncherTestHelper.runTestWithMultipleDatasetsAndFaultyExtractor(jobProps, true);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
}
@Test
public void testLaunchJobWithNonThreadsafeDataPublisher() throws Exception {
final Logger log = LoggerFactory.getLogger(getClass().getName() + ".testLaunchJobWithNonThreadsafeDataPublisher");
log.info("in");
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithNonThreadsafeDataPublisher");
jobProps.setProperty(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE, TestNonThreadsafeDataPublisher.class.getName());
// make sure the count starts from 0
TestNonThreadsafeDataPublisher.instantiatedCount.set(0);
try {
this.jobLauncherTestHelper.runTestWithMultipleDatasets(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
// A different publisher is used for each dataset
Assert.assertEquals(TestNonThreadsafeDataPublisher.instantiatedCount.get(), 4);
log.info("out");
}
@Test
public void testLaunchJobWithThreadsafeDataPublisher() throws Exception {
final Logger log = LoggerFactory.getLogger(getClass().getName() + ".testLaunchJobWithThreadsafeDataPublisher");
log.info("in");
Properties jobProps = loadJobProps();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + "-testLaunchJobWithThreadsafeDataPublisher");
jobProps.setProperty(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE, TestThreadsafeDataPublisher.class.getName());
// make sure the count starts from 0
TestThreadsafeDataPublisher.instantiatedCount.set(0);
try {
this.jobLauncherTestHelper.runTestWithMultipleDatasets(jobProps);
} finally {
this.jobLauncherTestHelper.deleteStateStore(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
// The same publisher is used for all the data sets
Assert.assertEquals(TestThreadsafeDataPublisher.instantiatedCount.get(), 1);
log.info("out");
}
@AfterClass(alwaysRun = true)
public void tearDown() throws IOException {
if (testMetastoreDatabase != null) {
testMetastoreDatabase.close();
}
}
public Properties loadJobProps() throws IOException {
Properties jobProps = new Properties();
try (InputStream propsReader = getClass().getClassLoader().getResourceAsStream("mr-job-conf/GobblinMRTest.pull")) {
jobProps.load(propsReader);
}
jobProps.putAll(this.launcherProps);
jobProps.setProperty(JobLauncherTestHelper.SOURCE_FILE_LIST_KEY,
"gobblin-test/resource/source/test.avro.0," + "gobblin-test/resource/source/test.avro.1,"
+ "gobblin-test/resource/source/test.avro.2," + "gobblin-test/resource/source/test.avro.3");
return jobProps;
}
public static class TestDynamicConfigGenerator implements DynamicConfigGenerator {
public TestDynamicConfigGenerator() {
}
@Override
public Config generateDynamicConfig(Config config) {
return ConfigFactory.parseMap(ImmutableMap.of(JobLauncherTestHelper.DYNAMIC_KEY1,
JobLauncherTestHelper.DYNAMIC_VALUE1));
}
}
public static class TestNonThreadsafeDataPublisher extends DataPublisher {
// for counting how many times the object is instantiated in the test case
static AtomicInteger instantiatedCount = new AtomicInteger(0);
public TestNonThreadsafeDataPublisher(State state) {
super(state);
instantiatedCount.incrementAndGet();
}
@Override
public void initialize() throws IOException {
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
for (WorkUnitState workUnitState : states) {
// Upon successfully committing the data to the final output directory, set states
// of successful tasks to COMMITTED. leaving states of unsuccessful ones unchanged.
// This makes sense to the COMMIT_ON_PARTIAL_SUCCESS policy.
workUnitState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
}
}
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states) throws IOException {
}
@Override
public void close() throws IOException {
}
@Override
public boolean supportsCapability(Capability c, Map<String, Object> properties) {
return c == DataPublisher.REUSABLE;
}
}
public static class TestThreadsafeDataPublisher extends TestNonThreadsafeDataPublisher {
public TestThreadsafeDataPublisher(State state) {
super(state);
}
@Override
public boolean supportsCapability(Capability c, Map<String, Object> properties) {
return (c == Capability.THREADSAFE || c == DataPublisher.REUSABLE);
}
}
}
| 1,314 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/mapreduce/GobblinWorkUnitsInputFormatTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.net.URI;
import java.util.List;
import java.util.Set;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
public class GobblinWorkUnitsInputFormatTest {
@Test
public void testGetSplits()
throws Exception {
URI baseUri = new URI(GobblinWorkUnitsInputFormatTest.class.getSimpleName() + "://testGetSplits");
Configuration configuration = new Configuration();
Path workUnitsDir = new Path(new Path(baseUri), "/workUnits");
FileSystem fs = Mockito.mock(FileSystem.class);
FileStatus[] statuses = createFileStatuses(20, workUnitsDir);
Mockito.when(fs.listStatus(workUnitsDir)).thenReturn(statuses);
Mockito.when(fs.makeQualified(Mockito.any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
FileSystemTestUtils.addFileSystemForTest(baseUri, configuration, fs);
GobblinWorkUnitsInputFormat inputFormat = new GobblinWorkUnitsInputFormat();
Job job = Job.getInstance(configuration);
FileInputFormat.addInputPath(job, workUnitsDir);
List<InputSplit> splits = inputFormat.getSplits(job);
Assert.assertEquals(splits.size(), 20);
verifyPaths(splits, statuses);
}
@Test
public void testGetSplitsMaxSize()
throws Exception {
URI baseUri = new URI(GobblinWorkUnitsInputFormatTest.class.getSimpleName() + "://testGetSplitsMaxSize");
Configuration configuration = new Configuration();
Path workUnitsDir = new Path(new Path(baseUri), "/workUnits");
FileSystem fs = Mockito.mock(FileSystem.class);
FileStatus[] statuses = createFileStatuses(20, workUnitsDir);
Mockito.when(fs.listStatus(workUnitsDir)).thenReturn(statuses);
Mockito.when(fs.makeQualified(Mockito.any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
FileSystemTestUtils.addFileSystemForTest(baseUri, configuration, fs);
GobblinWorkUnitsInputFormat inputFormat = new GobblinWorkUnitsInputFormat();
Job job = Job.getInstance(configuration);
FileInputFormat.addInputPath(job, workUnitsDir);
GobblinWorkUnitsInputFormat.setMaxMappers(job, 6);
List<InputSplit> splits = inputFormat.getSplits(job);
Assert.assertTrue(splits.size() < 6);
verifyPaths(splits, statuses);
}
@Test
public void testSplit() throws Exception {
List<String> paths = Lists.newArrayList("/path1", "/path2");
GobblinWorkUnitsInputFormat.GobblinSplit split = new GobblinWorkUnitsInputFormat.GobblinSplit(paths);
ByteArrayOutputStream os = new ByteArrayOutputStream();
split.write(new DataOutputStream(os));
ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray());
GobblinWorkUnitsInputFormat.GobblinSplit deserSplit = new GobblinWorkUnitsInputFormat.GobblinSplit();
deserSplit.readFields(new DataInputStream(is));
Assert.assertEquals(split, deserSplit);
}
@Test
public void testRecordReader()
throws Exception {
List<String> paths = Lists.newArrayList("/path1", "/path2");
GobblinWorkUnitsInputFormat.GobblinSplit split = new GobblinWorkUnitsInputFormat.GobblinSplit(paths);
GobblinWorkUnitsInputFormat inputFormat = new GobblinWorkUnitsInputFormat();
RecordReader<LongWritable, Text> recordReader =
inputFormat.createRecordReader(split, new TaskAttemptContextImpl(new Configuration(), new TaskAttemptID("a", 1,
TaskType.MAP, 1, 1)));
recordReader.nextKeyValue();
Assert.assertEquals(recordReader.getCurrentKey().get(), 0);
Assert.assertEquals(recordReader.getCurrentValue().toString(), "/path1");
recordReader.nextKeyValue();
Assert.assertEquals(recordReader.getCurrentKey().get(), 1);
Assert.assertEquals(recordReader.getCurrentValue().toString(), "/path2");
Assert.assertFalse(recordReader.nextKeyValue());
}
private void verifyPaths(List<InputSplit> splits, FileStatus[] statuses) {
Set<String> splitPaths = Sets.newHashSet();
for (InputSplit split : splits) {
splitPaths.addAll(((GobblinWorkUnitsInputFormat.GobblinSplit) split).getPaths());
}
Set<String> statusPaths = Sets.newHashSet();
for (FileStatus status : statuses) {
statusPaths.add(status.getPath().toString());
}
Assert.assertEquals(splitPaths, statusPaths);
}
private FileStatus[] createFileStatuses(int howMany, Path basePath) {
FileStatus[] statuses = new FileStatus[howMany];
for (int i = 0; i < howMany; i++) {
statuses[i] = new FileStatus(0, false, 0, 0, 0, new Path(basePath, "file" + i));
}
return statuses;
}
}
| 1,315 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/mapreduce/MRTaskFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.testng.Assert;
import org.testng.annotations.Test;
@Slf4j
public class MRTaskFactoryTest {
@Test
public void test() throws Exception {
File inputSuperPath = Files.createTempDir();
inputSuperPath.deleteOnExit();
File outputSuperPath = Files.createTempDir();
outputSuperPath.deleteOnExit();
File job1Dir = new File(inputSuperPath, "job1");
Assert.assertTrue(job1Dir.mkdir());
writeFileWithContent(job1Dir, "file1", "word1 word1 word2");
writeFileWithContent(job1Dir, "file2", "word2 word2 word2");
File job2Dir = new File(inputSuperPath, "job2");
Assert.assertTrue(job2Dir.mkdir());
writeFileWithContent(job2Dir, "file1", "word1 word2 word2");
TestAppender testAppender = new TestAppender();
Logger logger = LogManager.getLogger(MRTask.class.getName());
logger.setLevel(Level.INFO);
logger.addAppender(testAppender);
EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin("WordCounter")
.setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, MRWordCountSource.class.getName())
.setConfiguration(MRWordCountSource.INPUT_DIRECTORIES_KEY, job1Dir.getAbsolutePath() + "," + job2Dir.getAbsolutePath())
.setConfiguration(MRWordCountSource.OUTPUT_LOCATION, outputSuperPath.getAbsolutePath());
JobExecutionResult result = embeddedGobblin.run();
logger.removeAppender(testAppender);
Assert.assertTrue(result.isSuccessful());
Assert.assertTrue(testAppender.events.stream().anyMatch(e -> e.getRenderedMessage()
.startsWith("MR tracking URL http://localhost:8080/ for job WordCount_job1")));
File output1 = new File(new File(outputSuperPath, "job1"), "part-r-00000");
Assert.assertTrue(output1.exists());
Map<String, Integer> counts = parseCounts(output1);
Assert.assertEquals((int) counts.get("word1"), 2);
Assert.assertEquals((int) counts.get("word2"), 4);
File output2 = new File(new File(outputSuperPath, "job2"), "part-r-00000");
Assert.assertTrue(output2.exists());
counts = parseCounts(output2);
Assert.assertEquals((int) counts.get("word1"), 1);
Assert.assertEquals((int) counts.get("word2"), 2);
}
private Map<String, Integer> parseCounts(File file) throws IOException {
Map<String, Integer> counts = Maps.newHashMap();
for (String line : Files.readLines(file, Charsets.UTF_8)) {
List<String> split = Splitter.on("\t").splitToList(line);
counts.put(split.get(0), Integer.parseInt(split.get(1)));
}
return counts;
}
private void writeFileWithContent(File dir, String fileName, String content) throws IOException {
File file = new File(dir, fileName);
Assert.assertTrue(file.createNewFile());
Files.write(content, file, Charsets.UTF_8);
}
public static class MRWordCountSource implements Source<String, String> {
public static final String INPUT_DIRECTORIES_KEY = "input.directories";
public static final String OUTPUT_LOCATION = "output.location";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<String> dirs = Splitter.on(",").splitToList(state.getProp(INPUT_DIRECTORIES_KEY));
String outputBase = state.getProp(OUTPUT_LOCATION);
List<WorkUnit> workUnits = Lists.newArrayList();
for (String dir : dirs) {
try {
Path input = new Path(dir);
Path output = new Path(outputBase, input.getName());
WorkUnit workUnit = new WorkUnit();
TaskUtils.setTaskFactoryClass(workUnit, MRTaskFactory.class);
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "WordCount_" + input.getName());
job.setJarByClass(MRTaskFactoryTest.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(1);
FileInputFormat.addInputPath(job, input);
FileOutputFormat.setOutputPath(job, output);
MRTask.serializeJobToState(workUnit, job);
workUnits.add(workUnit);
} catch (IOException ioe) {
log.error("Failed to create MR job for " + dir, ioe);
}
}
return workUnits;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void shutdown(SourceState state) {
throw new UnsupportedOperationException();
}
}
// This is taken directly from
// https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Mapper.Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
// This is taken directly from
// https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
private class TestAppender extends AppenderSkeleton {
List<LoggingEvent> events = new ArrayList<>();
public void close() {}
public boolean requiresLayout() {return false;}
@Override
protected void append(LoggingEvent event) {
events.add(event);
}
}
}
| 1,316 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/mapreduce/GobblinOutputCommitterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Unit tests for {@link GobblinOutputFormat}
*/
@Test(groups = { "gobblin.runtime.mapreduce" })
public class GobblinOutputCommitterTest {
private Configuration conf;
private FileSystem fs;
private List<Path> stagingDirs;
private static final Path OUTPUT_PATH = new Path("gobblin-test/output-format-test");
private static final String STAGING_DIR_NAME = "staging";
private static final String OUTPUT_DIR_NAME = "output";
private static final String JOB_NAME = "GobblinOutputFormatTest";
@BeforeClass
public void setupWorkUnitFiles() throws IOException {
this.conf = new Configuration();
this.fs = FileSystem.getLocal(this.conf);
this.stagingDirs = Lists.newArrayList();
// Create a list of WorkUnits to serialize
WorkUnit wu1 = createAndSetWorkUnit("wu1");
WorkUnit wu2 = createAndSetWorkUnit("wu2");
WorkUnit wu3 = createAndSetWorkUnit("wu3");
WorkUnit wu4 = createAndSetWorkUnit("wu4");
// Create a MultiWorkUnit to serialize
MultiWorkUnit mwu1 = MultiWorkUnit.createEmpty();
mwu1.setProp(ConfigurationKeys.TASK_ID_KEY, System.nanoTime());
mwu1.addWorkUnits(Arrays.asList(wu3, wu4));
Path inputDir = new Path(new Path(OUTPUT_PATH, JOB_NAME), "input");
// Writer each WorkUnit to a separate file under inputDir
Closer closer = Closer.create();
try {
wu1.write(closer.register(this.fs
.create(new Path(inputDir, wu1.getProp(ConfigurationKeys.TASK_ID_KEY) + Path.SEPARATOR + "_").suffix("wu"))));
wu2.write(closer.register(this.fs
.create(new Path(inputDir, wu2.getProp(ConfigurationKeys.TASK_ID_KEY) + Path.SEPARATOR + "_").suffix("wu"))));
mwu1.write(closer.register(this.fs.create(
new Path(inputDir, mwu1.getProp(ConfigurationKeys.TASK_ID_KEY) + Path.SEPARATOR + "_").suffix("mwu"))));
} finally {
closer.close();
}
}
@Test()
public void testAbortJob() throws IOException {
// Make sure all the staging dirs have been created
for (Path stagingDir : this.stagingDirs) {
Assert.assertTrue(this.fs.exists(stagingDir));
}
// Cleanup the staging dirs
Configuration conf = new Configuration();
conf.set(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI);
conf.set(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY, OUTPUT_PATH.toString());
conf.set(ConfigurationKeys.JOB_NAME_KEY, JOB_NAME);
new GobblinOutputCommitter().abortJob(Job.getInstance(conf), JobStatus.State.RUNNING);
// Make sure all the staging dirs have been deleted
for (Path stagingDir : this.stagingDirs) {
Assert.assertTrue(!this.fs.exists(stagingDir));
}
}
/**
* Helper method to create a {@link WorkUnit}, set it's staging directories, and create the staging directories on the
* local fs
* @param workUnitName is the name of the {@link WorkUnit} to create
* @return the {@link WorkUnit} that was created
* @throws IOException
*/
private WorkUnit createAndSetWorkUnit(String workUnitName) throws IOException {
WorkUnit wu = WorkUnit.createEmpty();
wu.setProp(ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.TASK_ID_KEY, 1, 0), System.nanoTime());
Path wuStagingDir =
new Path(OUTPUT_PATH, JOB_NAME + Path.SEPARATOR + workUnitName + Path.SEPARATOR + STAGING_DIR_NAME);
wu.setProp(ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_DIR, 1, 0),
wuStagingDir.toString());
this.fs.mkdirs(wuStagingDir);
this.stagingDirs.add(wuStagingDir);
Path wuOutputDir =
new Path(OUTPUT_PATH, JOB_NAME + Path.SEPARATOR + workUnitName + Path.SEPARATOR + OUTPUT_DIR_NAME);
wu.setProp(ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_OUTPUT_DIR, 1, 0),
wuOutputDir.toString());
this.fs.mkdirs(wuOutputDir);
this.stagingDirs.add(wuOutputDir);
return wu;
}
@AfterClass
public void deleteWorkUnitFiles() throws IOException {
this.fs.delete(OUTPUT_PATH, true);
}
}
| 1,317 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/spec_catalog/FlowCatalogTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
import com.google.common.base.Optional;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.File;
import java.net.URI;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.exception.QuotaExceededException;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.hadoop.fs.Path;
import static org.mockito.Mockito.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class FlowCatalogTest {
private static final Logger logger = LoggerFactory.getLogger(FlowCatalog.class);
private static Gson gson = new GsonBuilder().setPrettyPrinting().create();
private static final String SPEC_STORE_PARENT_DIR = "/tmp";
private static final String SPEC_STORE_DIR = "/tmp/flowTestSpecStore";
private static final String SPEC_GROUP_DIR = "/tmp/flowTestSpecStore/flowTestGroupDir";
private static final String SPEC_DESCRIPTION = "Test Flow Spec";
private static final String SPEC_VERSION = FlowSpec.Builder.DEFAULT_VERSION;
private static final String UNCOMPILABLE_FLOW = "uncompilableFlow";
private ServiceBasedAppLauncher serviceLauncher;
private FlowCatalog flowCatalog;
private FlowSpec flowSpec;
private SpecCatalogListener mockListener;
@BeforeClass
public void setup() throws Exception {
File specStoreDir = new File(SPEC_STORE_DIR);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
Properties properties = new Properties();
properties.put("specStore.fs.dir", SPEC_STORE_DIR);
this.serviceLauncher = new ServiceBasedAppLauncher(properties, "FlowCatalogTest");
this.flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(properties),
Optional.of(logger));
this.mockListener = mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
this.flowCatalog.addListener(mockListener);
this.serviceLauncher.addService(flowCatalog);
// Start Catalog
this.serviceLauncher.start();
// Create Spec to play with
this.flowSpec = initFlowSpec(SPEC_STORE_DIR);
}
/**
* Create FlowSpec with default URI
*/
public static FlowSpec initFlowSpec(String specStore) {
return initFlowSpec(specStore, computeFlowSpecURI(), "flowName");
}
public static FlowSpec initFlowSpec(String specStore, URI uri){
return initFlowSpec(specStore, uri, "flowName");
}
/**
* Create FLowSpec with specified URI and SpecStore location.
*/
public static FlowSpec initFlowSpec(String specStore, URI uri, String flowName){
return initFlowSpec(specStore, uri, flowName, "", ConfigFactory.empty(), false);
}
public static FlowSpec initFlowSpec(String specStore, URI uri, String flowName, String flowGroup, Config additionalConfigs, boolean isAdhoc) {
Properties properties = new Properties();
properties.put(ConfigurationKeys.FLOW_NAME_KEY, flowName);
properties.put(ConfigurationKeys.FLOW_GROUP_KEY, flowGroup);
properties.put("job.name", flowName);
properties.put("job.group", flowGroup);
properties.put("specStore.fs.dir", specStore);
properties.put("specExecInstance.capabilities", "source:destination");
if (!isAdhoc) {
properties.put("job.schedule", "0 2 3 ? * 2-6");
}
Config defaults = ConfigUtils.propertiesToConfig(properties);
Config config = additionalConfigs.withFallback(defaults);
SpecExecutor specExecutorInstanceProducer = new InMemorySpecExecutor(config);
FlowSpec.Builder flowSpecBuilder = null;
flowSpecBuilder = FlowSpec.builder(uri)
.withConfig(config)
.withDescription(SPEC_DESCRIPTION)
.withVersion(SPEC_VERSION)
.withTemplate(URI.create("templateURI"));
return flowSpecBuilder.build();
}
@AfterClass
public void cleanUp() throws Exception {
// Shutdown Catalog
this.serviceLauncher.stop();
File specStoreDir = new File(SPEC_STORE_DIR);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@Test
public void createFlowSpec() throws Throwable {
// List Current Specs
Collection<Spec> specs = flowCatalog.getSpecs();
logger.info("[Before Create] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
FlowSpec flowSpec = (FlowSpec) spec;
logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 0, "Spec store should be empty before addition");
// Create and add Spec
this.flowCatalog.put(flowSpec);
// List Specs after adding
specs = flowCatalog.getSpecs();
logger.info("[After Create] Number of specs: " + specs.size());
i = 0;
for (Spec spec : specs) {
flowSpec = (FlowSpec) spec;
logger.info("[After Create] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 1, "Spec store should contain 1 Spec after addition");
Assert.assertEquals(flowCatalog.getSize(), 1, "Spec store should contain 1 Spec after addition");
}
@Test (dependsOnMethods = "createFlowSpec")
void testExist() throws Exception {
Assert.assertTrue(flowCatalog.exists(flowSpec.getUri()));
}
@Test (dependsOnMethods = "testExist")
public void deleteFlowSpec() throws SpecNotFoundException {
// List Current Specs
Collection<Spec> specs = flowCatalog.getSpecs();
logger.info("[Before Delete] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
FlowSpec flowSpec = (FlowSpec) spec;
logger.info("[Before Delete] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 1, "Spec store should initially have 1 Spec before deletion");
Assert.assertEquals(flowCatalog.getSize(), 1, "Spec store should initially have 1 Spec before deletion");
this.flowCatalog.remove(flowSpec.getUri());
// List Specs after adding
specs = flowCatalog.getSpecs();
logger.info("[After Delete] Number of specs: " + specs.size());
i = 0;
for (Spec spec : specs) {
flowSpec = (FlowSpec) spec;
logger.info("[After Delete] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 0, "Spec store should be empty after deletion");
Assert.assertEquals(flowCatalog.getSize(), 0, "Spec store should be empty after deletion");
}
@Test (dependsOnMethods = "deleteFlowSpec")
public void testRejectBadFlow() throws Throwable {
Collection<Spec> specs = flowCatalog.getSpecs();
logger.info("[Before Create] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
FlowSpec flowSpec = (FlowSpec) spec;
logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 0, "Spec store should be empty before addition");
// Create and add Spec
FlowSpec badSpec = initFlowSpec(SPEC_STORE_DIR, computeFlowSpecURI(), "badFlow");
// Assume that spec is rejected
when(this.mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(null));
Map<String, AddSpecResponse> response = this.flowCatalog.put(badSpec);
// Spec should be rejected from being stored
specs = flowCatalog.getSpecs();
Assert.assertEquals(specs.size(), 0);
Assert.assertEquals(flowCatalog.getSize(), 0);
}
@Test (dependsOnMethods = "testRejectBadFlow")
public void testRejectMissingListener() throws Throwable {
flowCatalog.removeListener(this.mockListener);
Collection<Spec> specs = flowCatalog.getSpecs();
logger.info("[Before Create] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
FlowSpec flowSpec = (FlowSpec) spec;
logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 0, "Spec store should be empty before addition");
// Create and add Spec
Map<String, AddSpecResponse> response = this.flowCatalog.put(flowSpec);
// Spec should be rejected from being stored
specs = flowCatalog.getSpecs();
Assert.assertEquals(specs.size(), 0);
Assert.assertEquals(flowCatalog.getSize(), 0);
}
@Test (dependsOnMethods = "testRejectMissingListener")
public void testRejectQuotaExceededFlow() {
Collection<Spec> specs = flowCatalog.getSpecs();
logger.info("[Before Create] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
FlowSpec flowSpec = (FlowSpec) spec;
logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(flowSpec));
}
Assert.assertEquals(specs.size(), 0, "Spec store should be empty before addition");
// Create and add Spec
FlowSpec badSpec = initFlowSpec(SPEC_STORE_DIR, computeFlowSpecURI(), "badFlow");
// Assume that spec is rejected
when(this.mockListener.onAddSpec(any())).thenThrow(new RuntimeException(new QuotaExceededException("error")));
try {
Map<String, AddSpecResponse> response = this.flowCatalog.put(badSpec);
} catch (Throwable e) {
Assert.assertTrue(e instanceof QuotaExceededException);
}
// Spec should be rejected from being stored
specs = flowCatalog.getSpecs();
Assert.assertEquals(specs.size(), 0);
}
public static URI computeFlowSpecURI() {
// Make sure this is relative
URI uri = PathUtils.relativizePath(new Path(SPEC_GROUP_DIR), new Path(SPEC_STORE_PARENT_DIR)).toUri();
return uri;
}
} | 1,318 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/spec_catalog/TopologyCatalogTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
import java.io.File;
import java.net.URI;
import java.util.Collection;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.typesafe.config.Config;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
public class TopologyCatalogTest {
private static final Logger logger = LoggerFactory.getLogger(TopologyCatalog.class);
private static Gson gson = new GsonBuilder().setPrettyPrinting().create();
private static final String SPEC_STORE_PARENT_DIR = "/tmp";
private static final String SPEC_STORE_DIR = "/tmp/topologyTestSpecStore";
private static final String SPEC_DESCRIPTION = "Test Topology Spec";
private static final String SPEC_VERSION = FlowSpec.Builder.DEFAULT_VERSION;
private ServiceBasedAppLauncher serviceLauncher;
private TopologyCatalog topologyCatalog;
private TopologySpec topologySpec;
@BeforeClass
public void setup() throws Exception {
File specStoreDir = new File(SPEC_STORE_DIR);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
Properties properties = new Properties();
properties.put("specStore.fs.dir", SPEC_STORE_DIR);
this.serviceLauncher = new ServiceBasedAppLauncher(properties, "TopologyCatalogTest");
this.topologyCatalog = new TopologyCatalog(ConfigUtils.propertiesToConfig(properties),
Optional.of(logger));
this.serviceLauncher.addService(topologyCatalog);
// Start Catalog
this.serviceLauncher.start();
// Create Spec to play with
this.topologySpec = initTopologySpec();
}
private TopologySpec initTopologySpec() {
Properties properties = new Properties();
properties.put("specStore.fs.dir", SPEC_STORE_DIR);
properties.put("specExecInstance.capabilities", "source:destination");
Config config = ConfigUtils.propertiesToConfig(properties);
SpecExecutor specExecutorInstanceProducer = new InMemorySpecExecutor(config);
TopologySpec.Builder topologySpecBuilder = TopologySpec.builder(computeTopologySpecURI())
.withConfig(config)
.withDescription(SPEC_DESCRIPTION)
.withVersion(SPEC_VERSION)
.withSpecExecutor(specExecutorInstanceProducer);
return topologySpecBuilder.build();
}
@AfterClass
public void cleanUp() throws Exception {
// Shutdown Catalog
this.serviceLauncher.stop();
File specStoreDir = new File(SPEC_STORE_DIR);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@Test
public void createTopologySpec() {
// List Current Specs
Collection<Spec> specs = topologyCatalog.getSpecs();
logger.info("[Before Create] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
TopologySpec topologySpec = (TopologySpec) spec;
logger.info("[Before Create] Spec " + i++ + ": " + gson.toJson(topologySpec));
}
Assert.assertTrue(specs.size() == 0, "Spec store should be empty before addition");
// Create and add Spec
this.topologyCatalog.put(topologySpec);
// List Specs after adding
specs = topologyCatalog.getSpecs();
logger.info("[After Create] Number of specs: " + specs.size());
i = 0;
for (Spec spec : specs) {
topologySpec = (TopologySpec) spec;
logger.info("[After Create] Spec " + i++ + ": " + gson.toJson(topologySpec));
}
Assert.assertTrue(specs.size() == 1, "Spec store should contain 1 Spec after addition");
}
@Test (dependsOnMethods = "createTopologySpec")
public void deleteTopologySpec() {
// List Current Specs
Collection<Spec> specs = topologyCatalog.getSpecs();
logger.info("[Before Delete] Number of specs: " + specs.size());
int i=0;
for (Spec spec : specs) {
TopologySpec topologySpec = (TopologySpec) spec;
logger.info("[Before Delete] Spec " + i++ + ": " + gson.toJson(topologySpec));
}
Assert.assertTrue(specs.size() == 1, "Spec store should initially have 1 Spec before deletion");
this.topologyCatalog.remove(topologySpec.getUri());
// List Specs after adding
specs = topologyCatalog.getSpecs();
logger.info("[After Create] Number of specs: " + specs.size());
i = 0;
for (Spec spec : specs) {
topologySpec = (TopologySpec) spec;
logger.info("[After Create] Spec " + i++ + ": " + gson.toJson(topologySpec));
}
Assert.assertTrue(specs.size() == 0, "Spec store should be empty after deletion");
}
public URI computeTopologySpecURI() {
// Make sure this is relative
URI uri = PathUtils.relativizePath(new Path(SPEC_STORE_DIR), new Path(SPEC_STORE_PARENT_DIR)).toUri();
return uri;
}
} | 1,319 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.embedded;
import java.util.ArrayList;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.template.ResourceBasedJobTemplate;
import org.apache.gobblin.util.test.HelloWorldSource;
import org.apache.gobblin.writer.test.GobblinTestEventBusWriter;
import org.apache.gobblin.writer.test.TestingEventBusAsserter;
public class EmbeddedGobblinTest {
@Test
public void testRunWithTemplate() throws Exception {
String eventBusId = this.getClass().getName();
int numHellos = 5;
TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId);
EmbeddedGobblin embeddedGobblin =
new EmbeddedGobblin("TestJob").setTemplate(ResourceBasedJobTemplate.forResourcePath("templates/hello-world.template"));
embeddedGobblin.setConfiguration(ConfigurationKeys.WRITER_BUILDER_CLASS, GobblinTestEventBusWriter.Builder.class.getName());
embeddedGobblin.setConfiguration(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY, eventBusId);
embeddedGobblin.setConfiguration(HelloWorldSource.NUM_HELLOS_FULL_KEY, Integer.toString(numHellos));
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
ArrayList<String> expectedEvents = new ArrayList<>();
for (int i = 1; i <= numHellos; ++i) {
expectedEvents.add(HelloWorldSource.ExtractorImpl.helloMessage(i));
}
asserter.assertNextValuesEq(expectedEvents);
asserter.close();
}
@Test
public void testRunWithJobFile() throws Exception {
String eventBusId = this.getClass().getName() + ".jobFileTest";
TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId);
EmbeddedGobblin embeddedGobblin =
new EmbeddedGobblin("TestJob").jobFile(getClass().getResource("/testJobs/helloWorld.conf").getPath());
embeddedGobblin.setConfiguration(ConfigurationKeys.WRITER_BUILDER_CLASS, GobblinTestEventBusWriter.Builder.class.getName());
embeddedGobblin.setConfiguration(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY, eventBusId);
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
ArrayList<String> expectedEvents = new ArrayList<>();
for (int i = 1; i <= 10; ++i) {
expectedEvents.add(HelloWorldSource.ExtractorImpl.helloMessage(i));
}
asserter.assertNextValuesEq(expectedEvents);
asserter.close();
}
@Test
public void testDistributedJars() throws Exception {
EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin("Test");
embeddedGobblin.distributeJarWithPriority("myJar", 0);
embeddedGobblin.distributeJarWithPriority("myJar2", -100);
embeddedGobblin.distributeJarWithPriority("myJar3", 10);
List<String> jars = embeddedGobblin.getPrioritizedDistributedJars();
Assert.assertEquals(jars.get(0), "myJar2");
Assert.assertEquals(jars.get(jars.size() - 1), "myJar3");
}
}
| 1,320 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/dag_action_store/MysqlDagActionStoreTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.dag_action_store;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Collection;
import java.util.HashSet;
import org.apache.gobblin.runtime.api.DagActionStore;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
public class MysqlDagActionStoreTest {
private static final String USER = "testUser";
private static final String PASSWORD = "testPassword";
private static final String TABLE = "dag_action_store";
private static final String flowGroup = "testFlowGroup";
private static final String flowName = "testFlowName";
private static final String flowExecutionId = "12345677";
private static final String flowExecutionId_2 = "12345678";
private static final String flowExecutionId_3 = "12345679";
private MysqlDagActionStore mysqlDagActionStore;
@BeforeClass
public void setUp() throws Exception {
ITestMetastoreDatabase testDb = TestMetastoreDatabaseFactory.get();
Config config = ConfigBuilder.create()
.addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_URL_KEY, testDb.getJdbcUrl())
.addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_USER_KEY, USER)
.addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, PASSWORD)
.addPrimitive("MysqlDagActionStore." + ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, TABLE)
.build();
this.mysqlDagActionStore = new MysqlDagActionStore(config);
}
@Test
public void testAddAction() throws Exception {
this.mysqlDagActionStore.addDagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL);
//Should not be able to add KILL again when previous one exist
Assert.expectThrows(IOException.class,
() -> this.mysqlDagActionStore.addDagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL));
//Should be able to add a RESUME action for same execution as well as KILL for another execution of the flow
this.mysqlDagActionStore.addDagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.RESUME);
this.mysqlDagActionStore.addDagAction(flowGroup, flowName, flowExecutionId_2, DagActionStore.FlowActionType.KILL);
}
@Test(dependsOnMethods = "testAddAction")
public void testExists() throws Exception {
Assert.assertTrue(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL));
Assert.assertTrue(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.RESUME));
Assert.assertTrue(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId_2, DagActionStore.FlowActionType.KILL));
Assert.assertFalse(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId_3, DagActionStore.FlowActionType.RESUME));
Assert.assertFalse(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId_3, DagActionStore.FlowActionType.KILL));
}
@Test(dependsOnMethods = "testExists")
public void testGetActions() throws IOException {
Collection<DagActionStore.DagAction> dagActions = this.mysqlDagActionStore.getDagActions();
Assert.assertEquals(3, dagActions.size());
HashSet<DagActionStore.DagAction> set = new HashSet<>();
set.add(new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL));
set.add(new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.RESUME));
set.add(new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId_2, DagActionStore.FlowActionType.KILL));
Assert.assertEquals(dagActions, set);
}
@Test(dependsOnMethods = "testGetActions")
public void testDeleteAction() throws IOException, SQLException {
this.mysqlDagActionStore.deleteDagAction(
new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL));
Assert.assertEquals(this.mysqlDagActionStore.getDagActions().size(), 2);
Assert.assertFalse(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.KILL));
Assert.assertTrue(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.RESUME));
Assert.assertTrue(this.mysqlDagActionStore.exists(flowGroup, flowName, flowExecutionId_2, DagActionStore.FlowActionType.KILL));
}
} | 1,321 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/job_monitor/MockedKafkaJobMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_monitor;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Charsets;
import com.google.common.base.Predicate;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.testing.AssertWithBackoff;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class MockedKafkaJobMonitor extends KafkaJobMonitor {
private static final Splitter SPLITTER_COMMA = Splitter.on(",");
private static final Splitter SPLITTER_COLON = Splitter.on(":");
public static final String REMOVE = "remove";
@Getter
private final Map<URI, JobSpec> jobSpecs;
public static MockedKafkaJobMonitor create(String topic, Config config) {
return new MockedKafkaJobMonitor(topic, config, Maps.<URI, JobSpec>newConcurrentMap());
}
private MockedKafkaJobMonitor(String topic, Config config, Map<URI, JobSpec> jobSpecs) {
super(topic, createMockCatalog(jobSpecs), config);
this.jobSpecs = jobSpecs;
}
private static MutableJobCatalog createMockCatalog(final Map<URI, JobSpec> jobSpecs) {
MutableJobCatalog jobCatalog = Mockito.mock(MutableJobCatalog.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation)
throws Throwable {
JobSpec jobSpec = (JobSpec) invocation.getArguments()[0];
jobSpecs.put(jobSpec.getUri(), jobSpec);
return null;
}
}).when(jobCatalog).put(Mockito.any(JobSpec.class));
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation)
throws Throwable {
URI uri = (URI) invocation.getArguments()[0];
jobSpecs.remove(uri);
return null;
}
}).when(jobCatalog).remove(Mockito.any(URI.class));
return jobCatalog;
}
@Override
public Collection<JobSpec> parseJobSpec(byte[] message)
throws IOException {
try {
String messageString = new String(message, Charsets.UTF_8);
List<JobSpec> jobSpecs = Lists.newArrayList();
for (String oneInstruction : SPLITTER_COMMA.split(messageString)) {
List<String> tokens = SPLITTER_COLON.splitToList(oneInstruction);
if (tokens.get(0).equals(REMOVE)) {
URI uri = new URI(tokens.get(1));
JobSpec jobSpec = new JobSpec.Builder(uri).withConfig(ConfigFactory.empty())
.withMetadata(ImmutableMap.of(SpecExecutor.VERB_KEY, SpecExecutor.Verb.DELETE.name())).build();
jobSpecs.add(jobSpec);
} else {
URI uri = new URI(tokens.get(0));
String version = tokens.get(1);
JobSpec jobSpec = new JobSpec.Builder(uri).withConfig(ConfigFactory.empty()).withVersion(version)
.withMetadata(ImmutableMap.of(SpecExecutor.VERB_KEY, SpecExecutor.Verb.ADD.name())).build();
jobSpecs.add(jobSpec);
}
}
return jobSpecs;
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
@Override
public void shutDown() {
super.shutDown();
}
public void awaitExactlyNSpecs(final int n) throws Exception {
AssertWithBackoff.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(@Nullable Void input) {
return MockedKafkaJobMonitor.this.jobSpecs.size() == n;
}
}, 30000, n + " specs", log, 2, 1000);
}
}
| 1,322 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/runtime/job_exec/TestJobLauncherExecutionDriver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_exec;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.runtime.api.JobExecution;
import org.apache.gobblin.runtime.api.JobExecutionMonitor;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.local.LocalJobLauncher;
import org.apache.gobblin.runtime.mapreduce.MRJobLauncher;
import org.apache.gobblin.util.test.TestingSource;
/**
* Unit tests for {@link JobLauncherExecutionDriver}
*/
public class TestJobLauncherExecutionDriver {
@Test
public void testConstructor() throws IOException, InterruptedException {
File tmpTestDir = Files.createTempDir();
try {
File localJobRootDir = new File(tmpTestDir, "localJobRoot");
Assert.assertTrue(localJobRootDir.mkdir());
final Logger log = LoggerFactory.getLogger(getClass().getSimpleName() + ".testConstructor");
Config jobConf1 =
ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob"))
.withValue(ConfigurationKeys.JOB_GROUP_KEY, ConfigValueFactory.fromAnyRef("myGroup"))
.withValue(ConfigurationKeys.JOB_DESCRIPTION_KEY,
ConfigValueFactory.fromAnyRef("Awesome job"))
.withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY,
ConfigValueFactory.fromAnyRef(localJobRootDir.getPath()))
.withValue(ConfigurationKeys.SOURCE_CLASS_KEY,
ConfigValueFactory.fromAnyRef(TestingSource.class.getName()))
.withValue(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, ConfigValueFactory.fromAnyRef(false));
JobSpec jobSpec1 = JobSpec.builder().withConfig(jobConf1).build();
JobLauncherExecutionDriver.Launcher launcher =
new JobLauncherExecutionDriver.Launcher()
.withJobLauncherType(JobLauncherFactory.JobLauncherType.LOCAL)
.withLog(log);
JobLauncherExecutionDriver jled = null;
JobExecutionMonitor monitor = launcher.launchJob(jobSpec1);
if (monitor instanceof JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) {
jled = ((JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) monitor).getDriver();
}
Assert.assertTrue(jled.getLegacyLauncher() instanceof LocalJobLauncher);
JobExecution jex1 = jled.getJobExecution();
Assert.assertEquals(jex1.getJobSpecURI(), jobSpec1.getUri());
Assert.assertEquals(jex1.getJobSpecVersion(), jobSpec1.getVersion());
Thread.sleep(5000);
File mrJobRootDir = new File(tmpTestDir, "mrJobRoot");
Assert.assertTrue(mrJobRootDir.mkdir());
Config jobConf2 =
ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_NAME_KEY, ConfigValueFactory.fromAnyRef("myJob2"))
.withValue(ConfigurationKeys.JOB_GROUP_KEY, ConfigValueFactory.fromAnyRef("myGroup"))
.withValue(ConfigurationKeys.JOB_DESCRIPTION_KEY,
ConfigValueFactory.fromAnyRef("Awesome job"))
.withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY,
ConfigValueFactory.fromAnyRef(mrJobRootDir.getPath()))
.withValue(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY,
ConfigValueFactory.fromAnyRef(mrJobRootDir.getPath()))
.withValue(ConfigurationKeys.SOURCE_CLASS_KEY,
ConfigValueFactory.fromAnyRef(TestingSource.class.getName()))
.withValue(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, ConfigValueFactory.fromAnyRef(false));
JobSpec jobSpec2 = JobSpec.builder().withConfig(jobConf2).build();
jled = null;
monitor = launcher
.withJobLauncherType(JobLauncherFactory.JobLauncherType.MAPREDUCE)
.launchJob(jobSpec2);
if (monitor instanceof JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) {
jled = ((JobLauncherExecutionDriver.JobExecutionMonitorAndDriver) monitor).getDriver();
}
Assert.assertTrue(jled.getLegacyLauncher() instanceof MRJobLauncher);
JobExecution jex2 = jled.getJobExecution();
Assert.assertEquals(jex2.getJobSpecURI(), jobSpec2.getUri());
Assert.assertEquals(jex2.getJobSpecVersion(), jobSpec2.getVersion());
Assert.assertTrue(jex2.getLaunchTimeMillis() >= jex1.getLaunchTimeMillis());
}
finally {
FileUtils.deleteDirectory(tmpTestDir);
}
}
}
| 1,323 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/scheduler/JobConfigFileMonitorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.nio.file.Files;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ServiceManager;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Unit tests for the job configuration file monitor in {@link org.apache.gobblin.scheduler.JobScheduler}.
*
* @author Yinan Li
*/
@Test(enabled=false, groups = {"gobblin.scheduler"})
public class JobConfigFileMonitorTest {
private static final String JOB_CONFIG_FILE_DIR = "gobblin-test/resource/job-conf";
private String jobConfigDir;
private ServiceManager serviceManager;
private JobScheduler jobScheduler;
private File newJobConfigFile;
private class GetNumScheduledJobs implements Function<Void, Integer> {
@Override
public Integer apply(Void input) {
return JobConfigFileMonitorTest.this.jobScheduler.getScheduledJobs().size();
}
}
@BeforeClass
public void setUp() throws Exception {
this.jobConfigDir =
Files.createTempDirectory(String.format("gobblin-test_%s_job-conf", this.getClass().getSimpleName()))
.toString();
FileUtils.forceDeleteOnExit(new File(this.jobConfigDir));
FileUtils.copyDirectory(new File(JOB_CONFIG_FILE_DIR), new File(jobConfigDir));
Properties properties = new Properties();
try (Reader schedulerPropsReader = new FileReader("gobblin-test/resource/gobblin.test.properties")) {
properties.load(schedulerPropsReader);
}
properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY, jobConfigDir);
properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, jobConfigDir);
properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL_KEY, "1000");
properties.setProperty(ConfigurationKeys.METRICS_ENABLED_KEY, "false");
SchedulerService quartzService = new SchedulerService(new Properties());
this.jobScheduler = new JobScheduler(properties, quartzService);
this.serviceManager = new ServiceManager(Lists.newArrayList(quartzService, this.jobScheduler));
this.serviceManager.startAsync().awaitHealthy(10, TimeUnit.SECONDS);;
}
@Test (enabled=false)
public void testAddNewJobConfigFile() throws Exception {
final Logger log = LoggerFactory.getLogger("testAddNewJobConfigFile");
log.info("testAddNewJobConfigFile: start");
AssertWithBackoff assertWithBackoff = AssertWithBackoff.create().logger(log).timeoutMs(15000);
assertWithBackoff.assertEquals(new GetNumScheduledJobs(), 3, "3 scheduled jobs");
/* Set a time gap, to let the monitor recognize the "3-file" status as old status,
so that new added file can be discovered */
Thread.sleep(1000);
// Create a new job configuration file by making a copy of an existing
// one and giving a different job name
Properties jobProps = new Properties();
jobProps.load(new FileReader(new File(this.jobConfigDir, "GobblinTest1.pull")));
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY, "Gobblin-test-new");
this.newJobConfigFile = new File(this.jobConfigDir, "Gobblin-test-new.pull");
jobProps.store(new FileWriter(this.newJobConfigFile), null);
assertWithBackoff.assertEquals(new GetNumScheduledJobs(), 4, "4 scheduled jobs");
Set<String> jobNames = Sets.newHashSet(this.jobScheduler.getScheduledJobs());
Set<String> expectedJobNames =
ImmutableSet.<String>builder()
.add("GobblinTest1", "GobblinTest2", "GobblinTest3", "Gobblin-test-new")
.build();
Assert.assertEquals(jobNames, expectedJobNames);
log.info("testAddNewJobConfigFile: end");
}
@Test(enabled=false, dependsOnMethods = {"testAddNewJobConfigFile"})
public void testChangeJobConfigFile()
throws Exception {
final Logger log = LoggerFactory.getLogger("testChangeJobConfigFile");
log.info("testChangeJobConfigFile: start");
Assert.assertEquals(this.jobScheduler.getScheduledJobs().size(), 4);
// Make a change to the new job configuration file
Properties jobProps = new Properties();
jobProps.load(new FileReader(this.newJobConfigFile));
jobProps.setProperty(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, "partial");
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY, "Gobblin-test-new2");
jobProps.store(new FileWriter(this.newJobConfigFile), null);
AssertWithBackoff.create()
.logger(log)
.timeoutMs(30000)
.assertEquals(new GetNumScheduledJobs(), 4, "4 scheduled jobs");
final Set<String> expectedJobNames =
ImmutableSet.<String>builder()
.add("GobblinTest1", "GobblinTest2", "GobblinTest3", "Gobblin-test-new2")
.build();
AssertWithBackoff.create()
.logger(log)
.timeoutMs(30000)
.assertEquals(new Function<Void, Set<String>>() {
@Override public Set<String> apply(Void input) {
return Sets.newHashSet(JobConfigFileMonitorTest.this.jobScheduler.getScheduledJobs());
}
}, expectedJobNames, "Job change detected");
log.info("testChangeJobConfigFile: end");
}
@Test(enabled=false, dependsOnMethods = {"testChangeJobConfigFile"})
public void testUnscheduleJob()
throws Exception {
final Logger log = LoggerFactory.getLogger("testUnscheduleJob");
log.info("testUnscheduleJob: start");
Assert.assertEquals(this.jobScheduler.getScheduledJobs().size(), 4);
// Disable the new job by setting job.disabled=true
Properties jobProps = new Properties();
jobProps.load(new FileReader(this.newJobConfigFile));
jobProps.setProperty(ConfigurationKeys.JOB_DISABLED_KEY, "true");
jobProps.store(new FileWriter(this.newJobConfigFile), null);
AssertWithBackoff.create()
.logger(log)
.timeoutMs(7500)
.assertEquals(new GetNumScheduledJobs(), 3, "3 scheduled jobs");
Set<String> jobNames = Sets.newHashSet(this.jobScheduler.getScheduledJobs());
Assert.assertEquals(jobNames.size(), 3);
Assert.assertTrue(jobNames.contains("GobblinTest1"));
Assert.assertTrue(jobNames.contains("GobblinTest2"));
Assert.assertTrue(jobNames.contains("GobblinTest3"));
log.info("testUnscheduleJob: end");
}
@AfterClass
public void tearDown()
throws TimeoutException, IOException {
if (jobConfigDir != null) {
FileUtils.forceDelete(new File(jobConfigDir));
}
this.serviceManager.stopAsync().awaitStopped(30, TimeUnit.SECONDS);
}
}
| 1,324 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/scheduler/JobSchedulerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import com.google.common.base.Optional;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.junit.Assert;
import org.quartz.JobKey;
import org.quartz.Trigger;
import org.testng.annotations.Test;
public class JobSchedulerTest {
// This test creates two triggers with the same job key and job props, but one should have an extra value appended to
// it.
@Test
public void testCreateUniqueTriggersForJob() {
String jobName = "flow123";
String jobGroup = "groupA";
JobKey jobKey = new JobKey(jobName, jobGroup);
Properties jobProps = new Properties();
jobProps.put(ConfigurationKeys.JOB_NAME_KEY, jobName);
jobProps.put(ConfigurationKeys.JOB_GROUP_KEY, jobGroup);
jobProps.put(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?");
Trigger trigger1 = JobScheduler.createTriggerForJob(jobKey, jobProps, Optional.absent());
Trigger trigger2 = JobScheduler.createTriggerForJob(jobKey, jobProps, Optional.of("suffix"));
Assert.assertFalse(trigger1.getKey().equals(trigger2.getKey()));
Assert.assertTrue(trigger2.getKey().getName().endsWith("suffix"));
}
}
| 1,325 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/task/FailsWithExceptionTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.task;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
public class FailsWithExceptionTaskFactory implements TaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
return new AlwaysThrowsTask();
}
@Override
public DataPublisher createDataPublisher(JobState.DatasetState datasetState) {
return null;
}
public static class Source implements org.apache.gobblin.source.Source<String, String> {
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
WorkUnit wu = new WorkUnit();
TaskUtils.setTaskFactoryClass(wu, FailsWithExceptionTaskFactory.class);
return Collections.singletonList(wu);
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state)
throws IOException {
return null;
}
@Override
public void shutdown(SourceState state) {
}
}
private static class AlwaysThrowsTask implements TaskIFace {
@Override
public void run() {
throw new IllegalArgumentException("I always fail with an exception!");
}
@Override
public void commit() {
}
@Override
public State getPersistentState() {
return null;
}
@Override
public State getExecutionMetadata() {
return null;
}
@Override
public WorkUnitState.WorkingState getWorkingState() {
return null;
}
@Override
public void shutdown() {
}
@Override
public boolean awaitShutdown(long timeoutMillis)
throws InterruptedException {
return false;
}
@Override
public String getProgress() {
return null;
}
@Override
public boolean isSpeculativeExecutionSafe() {
return false;
}
}
}
| 1,326 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/task/EventBusPublishingTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.task;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import org.testng.Assert;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Queues;
import com.google.common.collect.SetMultimap;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.NoopPublisher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.runtime.task.BaseAbstractTask;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.writer.test.TestingEventBuses;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.Getter;
public class EventBusPublishingTaskFactory implements TaskFactory {
public static final String TASK_ID_KEY = "MyFactory.task.id";
public static final String EVENTBUS_ID_KEY = "eventbus.id";
public static final String RUN_EVENT = "run";
public static final String COMMIT_EVENT = "commit";
public static final String PUBLISH_EVENT = "publish";
public static final String PREVIOUS_STATE_EVENT = "previousState";
@Override
public TaskIFace createTask(TaskContext taskContext) {
String taskId = taskContext.getTaskState().getProp(TASK_ID_KEY);
EventBus eventBus = null;
if (taskContext.getTaskState().contains(EVENTBUS_ID_KEY)) {
String eventbusId = taskContext.getTaskState().getProp(EVENTBUS_ID_KEY);
eventBus = TestingEventBuses.getEventBus(eventbusId);
}
return new Task(taskContext, taskId, eventBus);
}
@Override
public DataPublisher createDataPublisher(JobState.DatasetState datasetState) {
EventBus eventBus = null;
if (datasetState.getTaskStates().get(0).contains(EVENTBUS_ID_KEY)) {
eventBus = TestingEventBuses.getEventBus(datasetState.getTaskStates().get(0).getProp(EVENTBUS_ID_KEY));
}
return new Publisher(datasetState, eventBus);
}
public static class EventListener {
@Getter
private final Queue<Event> events = Queues.newArrayDeque();
@Subscribe
public void listen(Event event) {
this.events.add(event);
}
public SetMultimap<String, Integer> getEventsSeenMap() {
SetMultimap<String, Integer> seenEvents = HashMultimap.create();
for (EventBusPublishingTaskFactory.Event event : this.events) {
seenEvents.put(event.getType(), event.getId());
}
return seenEvents;
}
}
public static class Source implements org.apache.gobblin.source.Source<String, String> {
public static final String NUM_TASKS_KEY = "num.tasks";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
int numTasks = state.getPropAsInt(NUM_TASKS_KEY);
String eventBusId = state.getProp(EVENTBUS_ID_KEY);
EventBus eventBus = TestingEventBuses.getEventBus(eventBusId);
Map<String, SourceState> previousStates = state.getPreviousDatasetStatesByUrns();
for (Map.Entry<String, SourceState> entry : previousStates.entrySet()) {
JobState.DatasetState datasetState = (JobState.DatasetState) entry.getValue();
for (TaskState taskState : datasetState.getTaskStates()) {
if (taskState.contains(Task.PERSISTENT_STATE) && eventBus != null) {
eventBus.post(new Event(PREVIOUS_STATE_EVENT, taskState.getPropAsInt(Task.PERSISTENT_STATE)));
}
}
}
List<WorkUnit> workUnits = Lists.newArrayList();
for (int i = 0; i < numTasks; i++) {
workUnits.add(createWorkUnit(i, eventBusId));
}
return workUnits;
}
protected WorkUnit createWorkUnit(int wuNumber, String eventBusId) {
WorkUnit workUnit = new WorkUnit();
TaskUtils.setTaskFactoryClass(workUnit, EventBusPublishingTaskFactory.class);
workUnit.setProp(EVENTBUS_ID_KEY, eventBusId);
workUnit.setProp(TASK_ID_KEY, wuNumber);
return workUnit;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void shutdown(SourceState state) {
// Do nothing
}
}
@Data
@EqualsAndHashCode(callSuper = false)
public static class Task extends BaseAbstractTask {
public static final String PERSISTENT_STATE = "persistent.state";
public static final String EXECUTION_METADATA = "execution.metadata";
private final String taskId;
private final EventBus eventBus;
public Task(TaskContext taskContext, String taskId, EventBus eventBus) {
super(taskContext);
this.taskId = taskId;
this.eventBus = eventBus;
}
@Override
public void run() {
if (this.eventBus != null) {
this.eventBus.post(new Event(RUN_EVENT, Integer.parseInt(this.taskId)));
}
super.run();
}
@Override
public void commit() {
if (this.eventBus != null) {
this.eventBus.post(new Event(COMMIT_EVENT, Integer.parseInt(this.taskId)));
}
super.commit();
}
@Override
public State getPersistentState() {
State state = super.getPersistentState();
state.setProp(PERSISTENT_STATE, this.taskId);
return state;
}
@Override
public State getExecutionMetadata() {
State state = super.getExecutionMetadata();
state.setProp(EXECUTION_METADATA, this.taskId);
return state;
}
}
public static class Publisher extends NoopPublisher {
private final EventBus eventBus;
public Publisher(State state, EventBus eventBus) {
super(state);
this.eventBus = eventBus;
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
for (WorkUnitState state : states) {
String taskId = state.getProp(TASK_ID_KEY);
Assert.assertEquals(state.getProp(Task.EXECUTION_METADATA), taskId);
Assert.assertEquals(state.getProp(Task.PERSISTENT_STATE), taskId);
if (this.eventBus != null) {
this.eventBus.post(new Event(PUBLISH_EVENT, Integer.parseInt(state.getProp(TASK_ID_KEY))));
}
}
super.publishData(states);
}
}
@Data
public static class Event {
private final String type;
private final int id;
}
}
| 1,327 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/task/CustomTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.task;
import java.io.File;
import java.util.Set;
import java.util.UUID;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.writer.test.TestingEventBuses;
public class CustomTaskTest {
@Test(timeOut = 30000)
public void testTaskFailsWithException() throws Exception {
// Test that the job runner fails with a reasonable amount of time if a custom task throws an exception
JobExecutionResult result =
new EmbeddedGobblin("alwaysThrowsJob").setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, FailsWithExceptionTaskFactory.Source.class.getName())
.run();
Assert.assertFalse(result.isSuccessful());
}
@Test
public void testCustomTask() throws Exception {
String eventBusId = UUID.randomUUID().toString();
EventBusPublishingTaskFactory.EventListener listener = new EventBusPublishingTaskFactory.EventListener();
EventBus eventBus = TestingEventBuses.getEventBus(eventBusId);
eventBus.register(listener);
JobExecutionResult result =
new EmbeddedGobblin("testJob").setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, EventBusPublishingTaskFactory.Source.class.getName())
.setConfiguration(EventBusPublishingTaskFactory.Source.NUM_TASKS_KEY, "10").setConfiguration(EventBusPublishingTaskFactory.EVENTBUS_ID_KEY, eventBusId)
.run();
Assert.assertTrue(result.isSuccessful());
SetMultimap<String, Integer> seenEvents = HashMultimap.create();
Set<Integer> expected = Sets.newHashSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
for (EventBusPublishingTaskFactory.Event event : listener.getEvents()) {
seenEvents.put(event.getType(), event.getId());
}
Assert.assertEquals(seenEvents.get("run"), expected);
Assert.assertEquals(seenEvents.get("commit"), expected);
Assert.assertEquals(seenEvents.get("publish"), expected);
}
@Test
public void testStatePersistence() throws Exception {
File stateStore = Files.createTempDir();
stateStore.deleteOnExit();
String eventBusId = UUID.randomUUID().toString();
EventBusPublishingTaskFactory.EventListener listener = new EventBusPublishingTaskFactory.EventListener();
EventBus eventBus = TestingEventBuses.getEventBus(eventBusId);
eventBus.register(listener);
EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin("testJob")
.setConfiguration(EventBusPublishingTaskFactory.EVENTBUS_ID_KEY, eventBusId)
.setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, EventBusPublishingTaskFactory.Source.class.getName())
.setConfiguration(EventBusPublishingTaskFactory.Source.NUM_TASKS_KEY, "2")
.setConfiguration(ConfigurationKeys.STATE_STORE_ENABLED, Boolean.toString(true))
.setConfiguration(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, stateStore.getAbsolutePath());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
SetMultimap<String, Integer> seenEvents = HashMultimap.create();
for (EventBusPublishingTaskFactory.Event event : listener.getEvents()) {
seenEvents.put(event.getType(), event.getId());
}
Assert.assertEquals(seenEvents.get("previousState").size(), 0);
result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
seenEvents = HashMultimap.create();
for (EventBusPublishingTaskFactory.Event event : listener.getEvents()) {
seenEvents.put(event.getType(), event.getId());
}
Assert.assertEquals(seenEvents.get("previousState"), Sets.newHashSet(0, 1));
}
}
| 1,328 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/service/monitoring/FlowStatusGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.test.matchers.service.monitoring.FlowStatusMatch;
import org.apache.gobblin.test.matchers.service.monitoring.JobStatusMatch;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.mockito.Mockito.when;
public class FlowStatusGeneratorTest {
@Test
public void testIsFlowRunning() {
JobStatusRetriever jobStatusRetriever = Mockito.mock(JobStatusRetriever.class);
String flowName = "testName";
String flowGroup = "testGroup";
when(jobStatusRetriever.getLatestExecutionIdsForFlow(flowName, flowGroup, 1)).thenReturn(null);
FlowStatusGenerator flowStatusGenerator = new FlowStatusGenerator(jobStatusRetriever);
Assert.assertFalse(flowStatusGenerator.isFlowRunning(flowName, flowGroup));
//If a flow is COMPILED, isFlowRunning() should return true.
long flowExecutionId = 1234L;
when(jobStatusRetriever.getLatestExecutionIdsForFlow(flowName, flowGroup, 1)).thenReturn(
Lists.newArrayList(flowExecutionId));
JobStatus jobStatus = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(JobStatusRetriever.NA_KEY).jobGroup(JobStatusRetriever.NA_KEY).eventName("COMPILED").build();
Iterator<JobStatus> jobStatusIterator = Lists.newArrayList(jobStatus).iterator();
when(jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId)).thenReturn(jobStatusIterator);
Assert.assertTrue(flowStatusGenerator.isFlowRunning(flowName, flowGroup));
//JobStatuses should be ignored, only the flow level status matters.
String job1 = "job1";
String job2 = "job2";
String job3 = "job3";
JobStatus jobStatus1 = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(job1).eventName("COMPLETE").build();
JobStatus jobStatus2 = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(job2).eventName("FAILED").build();
JobStatus jobStatus3 = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(job3).eventName("CANCELLED").build();
JobStatus flowStatus = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(JobStatusRetriever.NA_KEY).jobGroup(JobStatusRetriever.NA_KEY).eventName("CANCELLED").build();
jobStatusIterator = Lists.newArrayList(jobStatus1, jobStatus2, jobStatus3, flowStatus).iterator();
when(jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId)).thenReturn(jobStatusIterator);
Assert.assertFalse(flowStatusGenerator.isFlowRunning(flowName, flowGroup));
flowStatus = JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.jobName(JobStatusRetriever.NA_KEY).jobGroup(JobStatusRetriever.NA_KEY).eventName("RUNNING").build();
jobStatusIterator = Lists.newArrayList(jobStatus1, jobStatus2, jobStatus3, flowStatus).iterator();
when(jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId)).thenReturn(jobStatusIterator);
when(jobStatusRetriever.getDagManagerEnabled()).thenReturn(true);
Assert.assertTrue(flowStatusGenerator.isFlowRunning(flowName, flowGroup));
}
@Test
public void testGetFlowStatusesAcrossGroup() {
final long JOB_EXEC_ID = 987L;
JobStatusRetriever jobStatusRetriever = Mockito.mock(JobStatusRetriever.class);
// setup: one flow...
String flowGroup = "myFlowGroup";
int countPerFlowName = 2;
String flowName1 = "flowName1";
long flowExecutionId1 = 111L;
ExecutionStatus flowStatus1 = ExecutionStatus.ORCHESTRATED;
// ...with two jobs, each (differently) tagged.
String f0Js1Status = ExecutionStatus.COMPLETE.name();
String f0Js1Tag = "step-1";
String f0Js1JobGroup1 = "job-group-x";
String f0Js1JobName1 = "job-name-a";
JobStatus f1Js0 = createFlowJobStatus(flowGroup, flowName1, flowExecutionId1, flowStatus1);
JobStatus f1Js1 = createJobStatus(flowGroup, flowName1, flowExecutionId1,
f0Js1Status, f0Js1Tag, f0Js1JobGroup1, f0Js1JobName1, JOB_EXEC_ID);
String f0Js2Status = ExecutionStatus.FAILED.name();
String f0Js2Tag = "step-2";
String f0Js2JobGroup1 = "job-group-y";
String f0Js2JobName1 = "job-name-b";
JobStatus f1Js2 = createJobStatus(flowGroup, flowName1, flowExecutionId1,
f0Js2Status, f0Js2Tag, f0Js2JobGroup1, f0Js2JobName1, JOB_EXEC_ID);
// IMPORTANT: result invariants to honor - ordered by ascending flowName, all of same flowName adjacent, therein descending flowExecutionId
// NOTE: Three copies of FlowStatus are needed for repeated use, due to mutable, non-rewinding `Iterator FlowStatus.getJobStatusIterator`
FlowStatus flowStatus = createFlowStatus(flowGroup, flowName1, flowExecutionId1, Arrays.asList(f1Js0, f1Js1, f1Js2), jobStatusRetriever);
FlowStatus flowStatus2 = createFlowStatus(flowGroup, flowName1, flowExecutionId1, Arrays.asList(f1Js0, f1Js1, f1Js2), jobStatusRetriever);
FlowStatus flowStatus3 = createFlowStatus(flowGroup, flowName1, flowExecutionId1, Arrays.asList(f1Js0, f1Js1, f1Js2), jobStatusRetriever);
Mockito.when(jobStatusRetriever.getFlowStatusesForFlowGroupExecutions("myFlowGroup", 2))
.thenReturn(Collections.singletonList(flowStatus), Collections.singletonList(flowStatus2), Collections.singletonList(flowStatus3)); // (for three invocations)
FlowStatusGenerator flowStatusGenerator = new FlowStatusGenerator(jobStatusRetriever);
JobStatusMatch.Dependent f0jsmDep1 = JobStatusMatch.Dependent.ofTagged(f0Js1JobGroup1, f0Js1JobName1, JOB_EXEC_ID, f0Js1Status, f0Js1Tag);
JobStatusMatch.Dependent f0jsmDep2 = JobStatusMatch.Dependent.ofTagged(f0Js2JobGroup1, f0Js2JobName1, JOB_EXEC_ID, f0Js2Status, f0Js2Tag);
// verify all jobs returned when no tag constraint
List<FlowStatus> flowStatusesResult = flowStatusGenerator.getFlowStatusesAcrossGroup(flowGroup, countPerFlowName, null);
Assert.assertEquals(flowStatusesResult.size(), 1);
assertThat(flowStatusesResult.get(0), FlowStatusMatch.withDependentJobStatuses(flowGroup, flowName1, flowExecutionId1, flowStatus1,
Arrays.asList(f0jsmDep1, f0jsmDep2)));
// verify 'flow pseudo status' plus first job returned against first job's tag
List<FlowStatus> flowStatusesResult2 = flowStatusGenerator.getFlowStatusesAcrossGroup(flowGroup, countPerFlowName, f0Js1Tag);
Assert.assertEquals(flowStatusesResult2.size(), 1);
assertThat(flowStatusesResult2.get(0), FlowStatusMatch.withDependentJobStatuses(flowGroup, flowName1, flowExecutionId1, flowStatus1,
Arrays.asList(f0jsmDep1)));
// verify 'flow pseudo status' plus second job returned against second job's tag
List<FlowStatus> flowStatusesResult3 = flowStatusGenerator.getFlowStatusesAcrossGroup(flowGroup, countPerFlowName, f0Js2Tag);
Assert.assertEquals(flowStatusesResult3.size(), 1);
assertThat(flowStatusesResult3.get(0), FlowStatusMatch.withDependentJobStatuses(flowGroup, flowName1, flowExecutionId1, flowStatus1,
Arrays.asList(f0jsmDep2)));
}
private FlowStatus createFlowStatus(String flowGroup, String flowName, long flowExecutionId, List<JobStatus> jobStatuses, JobStatusRetriever jobStatusRetriever) {
return new FlowStatus(flowName, flowGroup, flowExecutionId, jobStatuses.iterator(),
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.getDagManagerEnabled(), jobStatuses.iterator()));
}
private JobStatus createFlowJobStatus(String flowGroup, String flowName, long flowExecutionId, ExecutionStatus status) {
return createJobStatus(flowGroup, flowName, flowExecutionId, status.name(), null,
JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY, 0L);
}
private JobStatus createJobStatus(String flowGroup, String flowName, long flowExecutionId, String eventName,
String jobTag, String jobGroup, String jobName, long jobExecutionId) {
return JobStatus.builder().flowGroup(flowGroup).flowName(flowName).flowExecutionId(flowExecutionId)
.eventName(eventName).jobTag(jobTag)
.jobGroup(jobGroup).jobName(jobName).jobExecutionId(jobExecutionId).build();
}
} | 1,329 |
0 | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/test/java/org/apache/gobblin/performance/PerformanceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.performance;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.testng.Assert;
import com.google.common.collect.Lists;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.util.test.FastSequentialSource;
import org.apache.gobblin.writer.test.GobblinTestEventBusWriter;
import org.apache.gobblin.writer.test.TestingEventBuses;
/**
* Runs performance tests on Gobblin.
*/
public class PerformanceTest {
public static void main(String[] args) throws Exception {
testSourceThroughput();
testWriterThroughput();
testGobblinThroughput();
}
/**
* Test the throughput of a Gobblin pipeline with trivial source and writers and no converters / forks, etc.
*/
public static void testGobblinThroughput() throws Exception {
String eventBusId = PerformanceTest.class.getName();
EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin("PerformanceTest")
.setTemplate("resource:///templates/performanceTest.template")
.setConfiguration(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY, eventBusId);
EventHandler eventHandler = new EventHandler();
TestingEventBuses.getEventBus(eventBusId).register(eventHandler);
embeddedGobblin.run();
Assert.assertEquals(eventHandler.runSummaries.size(), 1);
GobblinTestEventBusWriter.RunSummary runSummary = eventHandler.runSummaries.get(0);
System.out.println(String.format("Task processed %d records in %d millis, qps: %f", runSummary.getRecordsWritten(),
runSummary.getTimeElapsedMillis(),
(double) runSummary.getRecordsWritten() * 1000 / runSummary.getTimeElapsedMillis()));
}
/**
* Test the throughput of the source used on {@link #testGobblinThroughput()} to prove it is not a bottleneck.
*/
public static void testSourceThroughput() throws Exception {
FastSequentialSource.FastSequentialExtractor extractor =
new FastSequentialSource.FastSequentialExtractor(10000000, 10);
Long thisRecord;
long lastRecord = 0;
long startTime = System.nanoTime();
while (true) {
thisRecord = extractor.readRecord(null);
if (thisRecord == null) {
break;
} else {
lastRecord = thisRecord;
}
// do nothing
}
long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
System.out.println(String.format("Source produced %d records in %d millis, qps: %f", lastRecord, elapsedMillis,
(double) lastRecord * 1000 / elapsedMillis));
}
/**
* Test the throughput of the writer used on {@link #testGobblinThroughput()} to prove it is not a bottleneck.
*/
public static void testWriterThroughput() throws Exception {
EventBus eventBus = new EventBus();
EventHandler eventHandler = new EventHandler();
eventBus.register(eventHandler);
GobblinTestEventBusWriter writer = new GobblinTestEventBusWriter(eventBus, GobblinTestEventBusWriter.Mode.COUNTING);
long records = 0;
long endAt = System.currentTimeMillis() + 10000;
long startTime = System.nanoTime();
for (records = 0; records < 10000000 && System.currentTimeMillis() < endAt; records++) {
writer.write(records);
}
long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
writer.commit();
Assert.assertEquals(eventHandler.runSummaries.get(0).getRecordsWritten(), records);
System.out.println(String.format("Writer consumed %d records in %d millis, qps: %f", records, elapsedMillis,
(double) records * 1000 / elapsedMillis));
}
private static class EventHandler {
List<GobblinTestEventBusWriter.RunSummary> runSummaries = Lists.newArrayList();
@Subscribe
public void registerCount(TestingEventBuses.Event event) {
this.runSummaries.add((GobblinTestEventBusWriter.RunSummary) event.getValue());
}
}
}
| 1,330 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/util/ReflectivePredicateEvaluator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.calcite.DataContext;
import org.apache.calcite.adapter.java.JavaTypeFactory;
import org.apache.calcite.linq4j.AbstractEnumerable;
import org.apache.calcite.linq4j.Enumerable;
import org.apache.calcite.linq4j.Enumerator;
import org.apache.calcite.linq4j.Linq4j;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
import org.apache.calcite.rel.type.RelDataTypeImpl;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.schema.ProjectableFilterableTable;
import org.apache.calcite.schema.Schema;
import org.apache.calcite.schema.SchemaFactory;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.schema.Table;
import org.apache.calcite.schema.impl.AbstractSchema;
import org.apache.calcite.schema.impl.AbstractTable;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import lombok.Data;
/**
* An predicate evaluator that uses an interface to define a table schema and can evaluate SQL statements on instances of
* that interface. See {@link ReflectivePredicateEvaluatorTest} for examples.
*
* Note all evaluated statements must return a single row with a single boolean column.
*
* Usage:
* ReflectivePredicateEvaluator<MyInterface> evaluator = new ReflectivePredicateEvaluator<>(MyInterface.class, "SELECT ... FROM myInterface");
* evaluator.evaluate(instance1, instance2, ...); // use the statement provided in constructor
* -- or --
* evaluator.evaluate("SELECT ... FROM myInterface", instance1, instance2, ...);
*/
public class ReflectivePredicateEvaluator implements Closeable {
private static final String REFERENCE_INTERFACES = "refInterface";
private static final String OPERATOR_ID = "operatorId";
private static final Pattern FIELD_NAME_EXTRACTOR = Pattern.compile("(?:get([A-Z]))?(.+)");
private static final String MODEL_PATTERN = "{"
+ "version: 1, defaultSchema: 'MAIN',"
+ "schemas: ["
+ "{name: 'MAIN', type: 'custom', factory: '%s', operand: {%s: '%s', %s: '%d'}}"
+ "]}";
private static final String CONNECT_STRING_PATTERN = "jdbc:calcite:model=inline:%s";
private static final Cache<Integer, ReflectivePredicateEvaluator> REGISTRY = CacheBuilder.newBuilder().weakValues().build();
private static final AtomicInteger IDENTIFIER = new AtomicInteger();
private final List<Class<?>> referenceInterfaces;
private final int identifier;
private final Connection conn;
private final PreparedStatement stmnt;
private final String sql;
private volatile List<Object> objects;
/**
* @param sql The default SQL expression to run in this evaluator.
* @param referenceInterfaces The interface that will be used to generate the table schema.
* @throws SQLException
*/
public ReflectivePredicateEvaluator(String sql, Class<?>... referenceInterfaces) throws SQLException {
this.referenceInterfaces = Lists.newArrayList(referenceInterfaces);
this.sql = sql;
this.identifier = IDENTIFIER.getAndIncrement();
REGISTRY.put(this.identifier, this);
String model = computeModel();
String connectString = String.format(CONNECT_STRING_PATTERN, model);
this.conn =
DriverManager.getConnection(connectString);
this.stmnt = prepareStatement(sql);
}
private PreparedStatement prepareStatement(String sql) throws SQLException {
PreparedStatement stmnt = null;
try {
stmnt = this.conn.prepareStatement(sql);
validateSql(stmnt, sql);
return stmnt;
} catch (Throwable t) {
if (stmnt != null) {
stmnt.close();
}
throw t;
}
}
private String computeModel() {
return String.format(MODEL_PATTERN, PESchemaFactory.class.getName(), REFERENCE_INTERFACES,
Joiner.on(",").join(this.referenceInterfaces.stream().map(Class::getName).collect(Collectors.toList())),
OPERATOR_ID, this.identifier);
}
private void validateSql(PreparedStatement stmnt, String sql) throws SQLException {
ResultSetMetaData metaData = stmnt.getMetaData();
if (metaData.getColumnCount() != 1 || metaData.getColumnType(1) != Types.BOOLEAN) {
throw new IllegalArgumentException("Statement is expected to return a single boolean column. Provided statement: " + sql);
}
}
/**
* Evaluate the default predicate on the list of provided objects.
* @throws SQLException
*/
public boolean evaluate(Object... objects) throws SQLException{
return evaluate(Lists.newArrayList(objects), null);
}
/**
* Evaluate an ad-hoc predicate on the list of provided objects.
* Note {@link #evaluate(Object...)} is preferable as it only does validation of the expression once.
* @throws SQLException
*/
public boolean evaluate(String sql, Object... objects) throws SQLException{
return evaluate(Lists.newArrayList(objects), sql);
}
/**
* Evaluate the default predicate on the list of provided objects.
* @throws SQLException
*/
public boolean evaluate(List<Object> objects) throws SQLException {
return evaluate(objects, null);
}
/**
* Evaluate an ad-hoc predicate on the list of provided objects.
* Note {@link #evaluate(Object[])} is preferable as it only does validation of the expression once.
* @throws SQLException
*/
public boolean evaluate(List<Object> objects, String sql) throws SQLException {
synchronized (this) {
String actualSql = sql == null ? this.sql : sql;
PreparedStatement actualStmnt = null;
try {
actualStmnt = sql == null ? this.stmnt : prepareStatement(sql);
this.objects = objects;
actualStmnt.execute();
ResultSet rs = actualStmnt.getResultSet();
if (!rs.next()) {
throw new IllegalArgumentException("Expected at least one returned row. SQL evaluated: " + actualSql);
}
boolean result = true;
do {
result &= rs.getBoolean(1);
} while (rs.next());
return result;
} finally {
if (sql != null && actualStmnt != null) {
actualStmnt.close();
}
}
}
}
@Override
public void close()
throws IOException {
try {
if (this.stmnt != null) {
this.stmnt.close();
}
if (this.conn != null) {
this.conn.close();
}
} catch (SQLException exc) {
throw new IOException("Failed to close " + ReflectivePredicateEvaluator.class.getSimpleName(), exc);
}
}
/**
* Calcite {@link SchemaFactory} used for the evaluator.
* This class is public because Calcite uses reflection to instantiate it, there is no reason to use it anywhere else
* in Gobblin.
*/
public static class PESchemaFactory implements SchemaFactory {
@Override
public Schema create(SchemaPlus parentSchema, String name, Map<String, Object> operand) {
try {
List<Class<?>> referenceInterfaces = new ArrayList<>();
for (String iface : Splitter.on(",").splitToList(operand.get(REFERENCE_INTERFACES).toString())) {
referenceInterfaces.add(Class.forName(iface));
}
int operatorIdentifier = Integer.parseInt(operand.get(OPERATOR_ID).toString());
return new AbstractSchema() {
@Override
protected Map<String, Table> getTableMap() {
HashMap<String, Table> map = new HashMap<>();
for (Class<?> iface : referenceInterfaces) {
map.put(iface.getSimpleName().toUpperCase(),
new PETable(iface, operatorIdentifier));
}
return map;
}
};
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(roe);
}
}
}
@Data
private static class PETable extends AbstractTable implements ProjectableFilterableTable {
private final Class<?> referenceInterface;
private final int operatorIdentifier;
private volatile boolean initialized = false;
private RelDataType rowType;
private List<Function<Object, Object>> methodsForFields = new ArrayList<>();
@Override
public Enumerable<Object[]> scan(DataContext root, List<RexNode> filters, int[] projects) {
List<Object> list = REGISTRY.getIfPresent(this.operatorIdentifier).objects;
final int[] actualProjects = resolveProjects(projects);
Enumerator<Object[]> enumerator = Linq4j.enumerator(list.stream()
.filter(o -> referenceInterface.isAssignableFrom(o.getClass()))
.map(
m -> {
Object[] res = new Object[actualProjects.length];
for (int i = 0; i < actualProjects.length; i++) {
res[i] = methodsForFields.get(actualProjects[i]).apply(m);
}
return res;
}
).collect(Collectors.toList()));
return new AbstractEnumerable<Object[]>() {
@Override
public Enumerator<Object[]> enumerator() {
return enumerator;
}
};
}
private int[] resolveProjects(int[] projects) {
if (projects == null) {
projects = new int[methodsForFields.size()];
for (int i = 0; i < projects.length; i++) {
projects[i] = i;
}
}
return projects;
}
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
initialize((JavaTypeFactory) typeFactory);
return this.rowType;
}
private synchronized void initialize(JavaTypeFactory typeFactory) {
if (this.initialized) {
return;
}
this.methodsForFields = new ArrayList<>();
List<RelDataTypeField> fields = new ArrayList<>();
for (Method method : this.referenceInterface.getMethods()) {
if (method.getParameterCount() == 0) {
String fieldName = computeFieldName(method.getName());
if (fieldName != null) {
this.methodsForFields.add(extractorForMethod(method));
Class<?> retType = method.getReturnType();
if (retType.isEnum()) {
retType = String.class;
}
fields.add(new RelDataTypeFieldImpl(fieldName.toUpperCase(), fields.size(), typeFactory.createType(retType)));
}
}
}
this.rowType = new MyDataType(fields, referenceInterface);
this.initialized = true;
}
private Function<Object, Object> extractorForMethod(Method method) {
return o -> {
try {
Object ret = method.invoke(o);
return method.getReturnType().isEnum() ? ret.toString() : ret;
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(roe);
}
};
}
}
private static class MyDataType extends RelDataTypeImpl {
private final String typeString;
public MyDataType(List<? extends RelDataTypeField> fieldList, Class<?> refInterface) {
super(fieldList);
this.typeString = refInterface.getName();
computeDigest();
}
@Override
protected void generateTypeString(StringBuilder sb, boolean withDetail) {
sb.append(typeString);
}
}
private static String computeFieldName(String methodName) {
Matcher matcher = FIELD_NAME_EXTRACTOR.matcher(methodName);
if (matcher.matches()) {
return matcher.group(1) + matcher.group(2);
}
return null;
}
}
| 1,331 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/util/DBStatementExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.zaxxer.hikari.HikariDataSource;
import java.io.Closeable;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.slf4j.Logger;
/**
* Many database stores require common functionality that can be stored in a utility class. The functionality
* includes executing prepared statements on a data source object and SQL queries at fixed intervals.
* The caller of the class MUST maintain ownership of the {@link DataSource} and close this instance when the
* {@link DataSource} is about to be closed well. Both are to be done only once this instance will no longer be used.
*/
public class DBStatementExecutor implements Closeable {
private final DataSource dataSource;
private final Logger log;
private final ArrayList<ScheduledThreadPoolExecutor> scheduledExecutors;
public DBStatementExecutor(DataSource dataSource, Logger log) {
this.dataSource = dataSource;
this.log = log;
this.scheduledExecutors = new ArrayList<>();
}
/** `j.u.Function` variant for an operation that may @throw IOException or SQLException: preserves method signature checked exceptions */
@FunctionalInterface
public interface CheckedFunction<T, R> {
R apply(T t) throws IOException, SQLException;
}
/** Abstracts recurring pattern around resource management and exception re-mapping. */
public <T> T withPreparedStatement(String sql, CheckedFunction<PreparedStatement, T> f, boolean shouldCommit)
throws IOException {
try (Connection connection = dataSource.getConnection();
PreparedStatement statement = connection.prepareStatement(sql)) {
T result = f.apply(statement);
if (shouldCommit) {
connection.commit();
}
statement.close();
return result;
} catch (SQLException e) {
log.warn("Received SQL exception that can result from invalid connection. Checking if validation query is set {} "
+ "Exception is {}", ((HikariDataSource) dataSource).getConnectionTestQuery(), e);
throw new IOException(e);
}
}
/**
* Repeats execution of a SQL command at a fixed interval while the service is running. The first execution of the
* command is immediate.
* @param sqlCommand SQL string
* @param interval frequency with which command will run
* @param timeUnit unit of time for interval
*/
public void repeatSqlCommandExecutionAtInterval(String sqlCommand, long interval, TimeUnit timeUnit) {
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
Runnable task = () -> {
try {
withPreparedStatement(sqlCommand,
preparedStatement -> {
int numRowsAffected = preparedStatement.executeUpdate();
if (numRowsAffected != 0) {
log.info("{} rows affected by SQL command: {}", numRowsAffected, sqlCommand);
}
return numRowsAffected;
}, true);
} catch (IOException e) {
log.error("Failed to execute SQL command: {}", sqlCommand, e);
}
};
executor.scheduleAtFixedRate(task, 0, interval, timeUnit);
this.scheduledExecutors.add(executor);
}
/**
* Call before closing the data source object associated with this instance to also shut down any executors expecting
* to be run on the data source.
*/
@Override
public void close() {
for (ScheduledThreadPoolExecutor executor : this.scheduledExecutors) {
executor.shutdownNow();
}
}
}
| 1,332 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/util/SchedulerUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.job_catalog.PackagedTemplatesJobCatalogDecorator;
import org.apache.gobblin.runtime.template.ResourceBasedJobTemplate;
import org.apache.gobblin.util.filesystem.PathAlterationObserverScheduler;
import org.apache.gobblin.util.filesystem.PathAlterationListener;
import org.apache.gobblin.util.filesystem.PathAlterationObserver;
/**
* A utility class used by the scheduler.
*
* @author Yinan Li
*/
public class SchedulerUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(SchedulerUtils.class);
// Extension of properties files
public static final String JOB_PROPS_FILE_EXTENSION = "properties";
/**
* Load job configuration from job configuration files stored in general file system,
* located by Path
* @param sysProps Gobblin framework configuration properties
* @return a list of job configurations in the form of {@link java.util.Properties}
*/
public static List<Properties> loadGenericJobConfigs(Properties sysProps, JobSpecResolver resolver)
throws ConfigurationException, IOException {
Path rootPath = new Path(sysProps.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY));
PullFileLoader loader = new PullFileLoader(rootPath, rootPath.getFileSystem(new Configuration()),
getJobConfigurationFileExtensions(sysProps), PullFileLoader.DEFAULT_HOCON_PULL_FILE_EXTENSIONS);
Config sysConfig = ConfigUtils.propertiesToConfig(sysProps);
Collection<Config> configs =
loader.loadPullFilesRecursively(rootPath, sysConfig, true);
List<Properties> jobConfigs = Lists.newArrayListWithCapacity(configs.size());
for (Config config : configs) {
try {
jobConfigs.add(resolveTemplate(ConfigUtils.configToProperties(config), resolver));
} catch (IOException ioe) {
LOGGER.error("Could not parse job config at " + ConfigUtils.getString(config,
ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY, "Unknown path"), ioe);
}
}
return jobConfigs;
}
/**
* Load job configurations from job configuration files affected by changes to the given common properties file.
* From a general file system.
* @param sysProps Gobblin framework configuration properties
* @param commonPropsPath the path of common properties file with changes
* @param jobConfigPathDir the path for root job configuration file directory
* @return a list of job configurations in the form of {@link java.util.Properties}
*/
public static List<Properties> loadGenericJobConfigs(Properties sysProps, Path commonPropsPath,
Path jobConfigPathDir, JobSpecResolver resolver)
throws ConfigurationException, IOException {
PullFileLoader loader = new PullFileLoader(jobConfigPathDir, jobConfigPathDir.getFileSystem(new Configuration()),
getJobConfigurationFileExtensions(sysProps), PullFileLoader.DEFAULT_HOCON_PULL_FILE_EXTENSIONS);
Config sysConfig = ConfigUtils.propertiesToConfig(sysProps);
Collection<Config> configs =
loader.loadPullFilesRecursively(commonPropsPath.getParent(), sysConfig, true);
List<Properties> jobConfigs = Lists.newArrayListWithCapacity(configs.size());
for (Config config : configs) {
try {
jobConfigs.add(resolveTemplate(ConfigUtils.configToProperties(config), resolver));
} catch (IOException ioe) {
LOGGER.error("Could not parse job config at " + ConfigUtils.getString(config,
ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY, "Unknown path"), ioe);
}
}
return jobConfigs;
}
/**
* Load a given job configuration file from a general file system.
*
* @param sysProps Gobblin framework configuration properties
* @param jobConfigPath job configuration file to be loaded
* @param jobConfigPathDir root job configuration file directory
* @return a job configuration in the form of {@link java.util.Properties}
*/
public static Properties loadGenericJobConfig(Properties sysProps, Path jobConfigPath, Path jobConfigPathDir,
JobSpecResolver resolver) throws ConfigurationException, IOException {
PullFileLoader loader = new PullFileLoader(jobConfigPathDir, jobConfigPathDir.getFileSystem(new Configuration()),
getJobConfigurationFileExtensions(sysProps), PullFileLoader.DEFAULT_HOCON_PULL_FILE_EXTENSIONS);
Config sysConfig = ConfigUtils.propertiesToConfig(sysProps);
Config config = loader.loadPullFile(jobConfigPath, sysConfig, true);
return resolveTemplate(ConfigUtils.configToProperties(config), resolver);
}
/**
* Add {@link PathAlterationObserverScheduler}s for the given
* root directory and any nested subdirectories under the root directory to the given
* {@link PathAlterationObserverScheduler}.
*
* @param monitor a {@link PathAlterationObserverScheduler}
* @param listener a {@link org.apache.gobblin.util.filesystem.PathAlterationListener}
* @param rootDirPath root directory
*/
public static void addPathAlterationObserver(PathAlterationObserverScheduler monitor, PathAlterationListener listener,
Path rootDirPath)
throws IOException {
PathAlterationObserver observer = new PathAlterationObserver(rootDirPath);
observer.addListener(listener);
monitor.addObserver(observer);
}
private static Set<String> getJobConfigurationFileExtensions(Properties properties) {
Iterable<String> jobConfigFileExtensionsIterable = Splitter.on(",")
.omitEmptyStrings()
.trimResults()
.split(properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_EXTENSIONS_KEY,
ConfigurationKeys.DEFAULT_JOB_CONFIG_FILE_EXTENSIONS));
return ImmutableSet.copyOf(Iterables.transform(jobConfigFileExtensionsIterable, new Function<String, String>() {
@Override
public String apply(String input) {
return null != input ? input.toLowerCase() : "";
}
}));
}
private static Properties resolveTemplate(Properties jobProps, JobSpecResolver resolver) throws IOException {
// If there is no job template, do not spend resources creating a new JobSpec
if (!jobProps.containsKey(ConfigurationKeys.JOB_TEMPLATE_PATH)) {
return jobProps;
}
try {
JobSpec.Builder jobSpecBuilder = JobSpec.builder().withConfig(ConfigUtils.propertiesToConfig(jobProps));
JobTemplate jobTemplate = ResourceBasedJobTemplate
.forResourcePath(jobProps.getProperty(ConfigurationKeys.JOB_TEMPLATE_PATH),
new PackagedTemplatesJobCatalogDecorator());
jobSpecBuilder.withTemplate(jobTemplate);
return resolver.resolveJobSpec(jobSpecBuilder.build()).getConfigAsProperties();
} catch (JobTemplate.TemplateException | SpecNotFoundException | URISyntaxException exc) {
throw new IOException(exc);
}
}
}
| 1,333 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/PoolBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.PoolBasedLimiter}.
*/
@Deprecated
public class PoolBasedLimiter extends org.apache.gobblin.util.limiter.PoolBasedLimiter {
public PoolBasedLimiter(int poolSize) {
super(poolSize);
}
}
| 1,334 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/HiveRegTaskStateCollectorServiceHandlerImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collection;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.publisher.HiveRegistrationPublisher;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A {@link TaskStateCollectorServiceHandler} implementation that execute hive registration on driver level.
* It registers all {@link TaskState} once they are available.
* Since {@link TaskStateCollectorService} is by default being invoked every minute,
* if a single batch of hive registration finishes within a minute, the latency can be hidden by the gap between two run
* of {@link TaskStateCollectorService}.
*/
@Slf4j
public class HiveRegTaskStateCollectorServiceHandlerImpl implements TaskStateCollectorServiceHandler {
private static final String TASK_COLLECTOR_SERVICE_PREFIX = "task.collector.service";
private static final String HIVE_REG_PUBLISHER_CLASS = "hive.reg.publisher.class";
private static final String HIVE_REG_PUBLISHER_CLASS_KEY = TASK_COLLECTOR_SERVICE_PREFIX + "." + HIVE_REG_PUBLISHER_CLASS;
private static final String DEFAULT_HIVE_REG_PUBLISHER_CLASS =
"org.apache.gobblin.publisher.HiveRegistrationPublisher";
private HiveRegistrationPublisher hiveRegHandler;
public HiveRegTaskStateCollectorServiceHandlerImpl(JobState jobState) {
String className = jobState
.getProp(HIVE_REG_PUBLISHER_CLASS_KEY, DEFAULT_HIVE_REG_PUBLISHER_CLASS);
try {
hiveRegHandler = (HiveRegistrationPublisher) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(className), jobState);
}catch (ReflectiveOperationException e) {
throw new RuntimeException("Could not instantiate HiveRegistrationPublisher " + className, e);
}
}
@Override
public void handle(Collection<? extends WorkUnitState> taskStates)
throws IOException {
this.hiveRegHandler.publishData(taskStates);
}
@Override
public void close()
throws IOException {
hiveRegHandler.close();
}
}
| 1,335 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/GobblinMultiTaskAttempt.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
import org.apache.commons.math3.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.TaskScopeInstance;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.broker.iface.SubscopedBrokerBuilder;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFaceWrapper;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.runtime.troubleshooter.InMemoryIssueRepository;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.runtime.util.TaskMetrics;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.TaskEventMetadataUtils;
import org.apache.gobblin.util.executors.IteratorExecutor;
import org.apache.gobblin.util.retry.RetryerFactory;
import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_INTERVAL_MS;
import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_TIME_OUT_MS;
/**
* Attempt of running multiple {@link Task}s generated from a list of{@link WorkUnit}s.
* A {@link GobblinMultiTaskAttempt} is usually a unit of workunits that are assigned to one container.
*/
@Alpha
public class GobblinMultiTaskAttempt {
/**
* An enumeration of policies on when a {@link GobblinMultiTaskAttempt} will be committed.
*/
public enum CommitPolicy {
/**
* Commit {@link GobblinMultiTaskAttempt} immediately after running is done.
*/
IMMEDIATE,
/**
* Not committing {@link GobblinMultiTaskAttempt} but leaving it to user customized launcher.
*/
CUSTOMIZED
}
private static final String TASK_STATE_STORE_SUCCESS_MARKER_SUFFIX = ".suc";
private final Logger log;
private final Iterator<WorkUnit> workUnits;
private final String jobId;
private final String attemptId;
private final JobState jobState;
private final TaskStateTracker taskStateTracker;
private final TaskExecutor taskExecutor;
private final Optional<String> containerIdOptional;
private final Optional<StateStore<TaskState>> taskStateStoreOptional;
private final SharedResourcesBroker<GobblinScopeTypes> jobBroker;
private final TaskEventMetadataGenerator taskEventMetadataGenerator;
@Setter
private Predicate<GobblinMultiTaskAttempt> interruptionPredicate = (gmta) -> false;
private List<Task> tasks;
@Getter
private volatile AtomicBoolean stopped = new AtomicBoolean(false);
private final IssueRepository issueRepository;
/**
* Additional commit steps that may be added by different launcher, and can be environment specific.
* Usually it should be clean-up steps, which are always executed at the end of {@link #commit()}.
*/
private List<CommitStep> cleanupCommitSteps;
public GobblinMultiTaskAttempt(Iterator<WorkUnit> workUnits, String jobId, JobState jobState,
TaskStateTracker taskStateTracker, TaskExecutor taskExecutor, Optional<String> containerIdOptional,
Optional<StateStore<TaskState>> taskStateStoreOptional, SharedResourcesBroker<GobblinScopeTypes> jobBroker) {
this(workUnits, jobId, jobState, taskStateTracker, taskExecutor, containerIdOptional, taskStateStoreOptional,
jobBroker, new InMemoryIssueRepository());
}
public GobblinMultiTaskAttempt(Iterator<WorkUnit> workUnits, String jobId, JobState jobState,
TaskStateTracker taskStateTracker, TaskExecutor taskExecutor, Optional<String> containerIdOptional,
Optional<StateStore<TaskState>> taskStateStoreOptional, SharedResourcesBroker<GobblinScopeTypes> jobBroker,
IssueRepository issueRepository) {
super();
this.workUnits = workUnits;
this.jobId = jobId;
this.issueRepository = issueRepository;
this.attemptId = this.getClass().getName() + "." + this.jobId;
this.jobState = jobState;
this.taskStateTracker = taskStateTracker;
this.taskExecutor = taskExecutor;
this.containerIdOptional = containerIdOptional;
this.taskStateStoreOptional = taskStateStoreOptional;
this.log =
LoggerFactory.getLogger(GobblinMultiTaskAttempt.class.getName() + "-" + containerIdOptional.or("noattempt"));
this.jobBroker = jobBroker;
this.tasks = new ArrayList<>();
this.taskEventMetadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(jobState);
}
/**
* Run {@link #workUnits} assigned in this attempt.
* @throws IOException
* @throws InterruptedException
*/
public void run()
throws IOException, InterruptedException {
if (!this.workUnits.hasNext()) {
log.warn("No work units to run in container " + containerIdOptional.or(""));
return;
}
CountUpAndDownLatch countDownLatch = new CountUpAndDownLatch(0);
Pair<List<Task>, Boolean> executionResult = runWorkUnits(countDownLatch);
this.tasks = executionResult.getFirst();
// The task attempt has already been stopped and the task list is empty. This indicates that a cancel has been
// invoked prior to creation of underlying Gobblin tasks. In a normal scenario, where a cancel is invoked after
// successful task creation, the task list is guaranteed to be non-empty and we shouldn't enter the following block.
if (this.tasks.isEmpty() && this.stopped.get()) {
return;
}
// Indicating task submission failure, propagating exception as it should be noticeable to job launcher.
// Submission failure could be task-creation failure, or state-tracker failed to be scheduled so that the actual
// task isn't submitted into the executor.
if (!executionResult.getSecond()) {
throw new TaskCreationException("Failing in submitting at least one task before execution.");
}
log.info("Waiting for submitted tasks of job {} to complete in container {}...", jobId, containerIdOptional.or(""));
try {
while (countDownLatch.getCount() > 0) {
if (this.interruptionPredicate.test(this)) {
log.info("Interrupting task execution due to satisfied predicate.");
interruptTaskExecution(countDownLatch);
break;
}
long totalTasks = countDownLatch.totalParties.get();
long runningTasks = countDownLatch.getCount();
log.info(String.format("%d out of %d tasks of job %s are running in container %s. %d tasks finished.",
runningTasks, totalTasks, jobId, containerIdOptional.or(""), totalTasks - runningTasks));
if (countDownLatch.await(10, TimeUnit.SECONDS)) {
break;
}
}
} catch (InterruptedException interrupt) {
log.info("Job interrupted by InterruptedException.");
interruptTaskExecution(countDownLatch);
}
log.info("All assigned tasks of job {} have completed in container {}", jobId, containerIdOptional.or(""));
}
private void interruptTaskExecution(CountDownLatch countDownLatch)
throws InterruptedException {
log.info("Job interrupted. Attempting a graceful shutdown of the job.");
this.tasks.forEach(Task::shutdown);
if (!countDownLatch.await(5, TimeUnit.SECONDS)) {
log.warn("Graceful shutdown of job timed out. Killing all outstanding tasks.");
try {
this.taskExecutor.shutDown();
} catch (Throwable t) {
throw new RuntimeException("Failed to shutdown task executor.", t);
}
}
}
/**
* Commit {@link #tasks} by 1. calling {@link Task#commit()} in parallel; 2. executing any additional {@link CommitStep};
* 3. persist task statestore.
* @throws IOException
*/
public void commit()
throws IOException {
if (this.tasks == null || this.tasks.isEmpty()) {
log.warn("No tasks to be committed in container " + containerIdOptional.or(""));
return;
}
Iterator<Callable<Void>> callableIterator =
Iterators.transform(this.tasks.iterator(), new Function<Task, Callable<Void>>() {
@Override
public Callable<Void> apply(final Task task) {
return new Callable<Void>() {
@Nullable
@Override
public Void call()
throws Exception {
task.commit();
return null;
}
};
}
});
try {
List<Either<Void, ExecutionException>> executionResults =
new IteratorExecutor<>(callableIterator, this.getTaskCommitThreadPoolSize(),
ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of("Task-committing-pool-%d")))
.executeAndGetResults();
IteratorExecutor.logFailures(executionResults, log, 10);
} catch (InterruptedException ie) {
log.error("Committing of tasks interrupted. Aborting.");
throw new RuntimeException(ie);
} finally {
reportTaskIssues();
persistTaskStateStore();
if (this.cleanupCommitSteps != null) {
for (CommitStep cleanupCommitStep : this.cleanupCommitSteps) {
log.info("Executing additional commit step.");
cleanupCommitStep.execute();
}
}
}
}
private void reportTaskIssues() {
if (issueRepository == null) {
log.info("Automatic troubleshooting is not configured for this task. "
+ "Make sure to pass issue repository to turn it on.");
return;
}
try {
for (Task task : this.tasks) {
task.getTaskState().setTaskIssues(issueRepository.getAll());
}
} catch (TroubleshooterException e) {
log.warn("Failed to save task issues", e);
}
}
/**
* A method that shuts down all running tasks managed by this instance.
* TODO: Call this from the right place.
*/
public synchronized void shutdownTasks()
throws InterruptedException {
log.info("Shutting down tasks");
for (Task task : this.tasks) {
task.shutdown();
}
for (Task task : this.tasks) {
task.awaitShutdown(1000);
}
for (Task task : this.tasks) {
if (task.cancel()) {
log.info("Task {} cancelled.", task.getTaskId());
} else {
log.info("Task {} could not be cancelled.", task.getTaskId());
}
}
this.stopped.set(true);
}
private void persistTaskStateStore()
throws IOException {
if (!this.taskStateStoreOptional.isPresent()) {
log.info("Task state store does not exist.");
return;
}
StateStore<TaskState> taskStateStore = this.taskStateStoreOptional.get();
for (Task task : this.tasks) {
String taskId = task.getTaskId();
// Delete the task state file for the task if it already exists.
// This usually happens if the task is retried upon failure.
if (taskStateStore.exists(jobId, taskId + AbstractJobLauncher.TASK_STATE_STORE_TABLE_SUFFIX)) {
taskStateStore.delete(jobId, taskId + AbstractJobLauncher.TASK_STATE_STORE_TABLE_SUFFIX);
}
}
boolean hasTaskFailure = false;
for (Task task : tasks) {
log.info("Writing task state for task " + task.getTaskId());
taskStateStore.put(task.getJobId(), task.getTaskId() + AbstractJobLauncher.TASK_STATE_STORE_TABLE_SUFFIX,
task.getTaskState());
if (task.getTaskState().getWorkingState() == WorkUnitState.WorkingState.FAILED) {
hasTaskFailure = true;
}
}
if (hasTaskFailure) {
String errorMsg = String.format("Tasks in container %s failed", containerIdOptional.or(""));
for (Task task : tasks) {
if (task.getTaskState().contains(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY)) {
errorMsg = String.format("Task failed: %s (Gobblin task id %s, container id %s)",
task.getTaskState().getProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY),
task.getTaskId(), containerIdOptional.or(""));
}
// If there are task failures then the tasks may be reattempted. Save a copy of the task state that is used
// to filter out successful tasks on subsequent attempts.
if (task.getTaskState().getWorkingState() == WorkUnitState.WorkingState.SUCCESSFUL
|| task.getTaskState().getWorkingState() == WorkUnitState.WorkingState.COMMITTED) {
taskStateStore
.put(task.getJobId(), task.getTaskId() + TASK_STATE_STORE_SUCCESS_MARKER_SUFFIX, task.getTaskState());
}
}
throw new IOException(errorMsg);
}
}
public boolean isSpeculativeExecutionSafe() {
for (Task task : tasks) {
if (!task.isSpeculativeExecutionSafe()) {
log.info("One task is not safe for speculative execution.");
return false;
}
}
log.info("All tasks are safe for speculative execution.");
return true;
}
private final int getTaskCommitThreadPoolSize() {
return Integer.parseInt(this.jobState.getProp(ConfigurationKeys.TASK_EXECUTOR_THREADPOOL_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_EXECUTOR_THREADPOOL_SIZE)));
}
public void addCleanupCommitStep(CommitStep commitStep) {
if (this.cleanupCommitSteps == null) {
this.cleanupCommitSteps = Lists.newArrayList(commitStep);
} else {
this.cleanupCommitSteps.add(commitStep);
}
}
/**
* Determine if the task executed successfully in a prior attempt by checking the task state store for the success
* marker.
* @param taskId task id to check
* @return whether the task was processed successfully in a prior attempt
*/
private boolean taskSuccessfulInPriorAttempt(String taskId) {
if (this.taskStateStoreOptional.isPresent()) {
StateStore<TaskState> taskStateStore = this.taskStateStoreOptional.get();
// Delete the task state file for the task if it already exists.
// This usually happens if the task is retried upon failure.
try {
if (taskStateStore.exists(jobId, taskId + TASK_STATE_STORE_SUCCESS_MARKER_SUFFIX)) {
log.info("Skipping task {} that successfully executed in a prior attempt.", taskId);
// skip tasks that executed successfully in a previous attempt
return true;
}
} catch (IOException e) {
// if an error while looking up the task state store then treat like it was not processed
return false;
}
}
return false;
}
/**
* Run a given list of {@link WorkUnit}s of a job.
*
* <p>
* This method assumes that the given list of {@link WorkUnit}s have already been flattened and
* each {@link WorkUnit} contains the task ID in the property {@link ConfigurationKeys#TASK_ID_KEY}.
* </p>
*
* @param countDownLatch a {@link java.util.concurrent.CountDownLatch} waited on for job completion
* @return a list of {@link Task}s from the {@link WorkUnit}s, as well as if there's a failure in task creation
* which should be handled separately to avoid silently starving on certain workunit.
*/
private synchronized Pair<List<Task>, Boolean> runWorkUnits(CountUpAndDownLatch countDownLatch) {
List<Task> tasks = Lists.newArrayList();
//Has the task-attempt already been cancelled? This can happen for instance when a cancellation has been invoked on
// the GobblinMultiTaskAttempt instance (e.g. in the case of Helix task cancellation) before the Gobblin tasks
// have been submitted to the underlying task executor.
if (this.stopped.get()) {
return new Pair<>(tasks, false);
}
// A flag indicating if there are any tasks not submitted successfully.
// Caller of this method should handle tasks with submission failures accordingly.
boolean areAllTasksSubmitted = true;
while (this.workUnits.hasNext()) {
WorkUnit workUnit = this.workUnits.next();
String taskId = workUnit.getProp(ConfigurationKeys.TASK_ID_KEY);
// skip tasks that executed successfully in a prior attempt
if (taskSuccessfulInPriorAttempt(taskId)) {
continue;
}
SubscopedBrokerBuilder<GobblinScopeTypes, ?> taskBrokerBuilder =
this.jobBroker.newSubscopedBuilder(new TaskScopeInstance(taskId));
WorkUnitState workUnitState = new WorkUnitState(workUnit, this.jobState, taskBrokerBuilder);
workUnitState.setId(taskId);
workUnitState.setProp(ConfigurationKeys.JOB_ID_KEY, this.jobId);
workUnitState.setProp(ConfigurationKeys.TASK_ID_KEY, taskId);
workUnitState.setProp(ConfigurationKeys.TASK_START_TIME_MILLIS_KEY, Long.toString(System.currentTimeMillis()));
if (this.containerIdOptional.isPresent()) {
workUnitState.setProp(ConfigurationKeys.TASK_ATTEMPT_ID_KEY, this.containerIdOptional.get());
}
// Create a new task from the work unit and submit the task to run.
// If an exception occurs here then the count down latch is decremented
// to avoid being stuck waiting for a task that was not created and submitted successfully.
Task task = null;
try {
countDownLatch.countUp();
task = createTaskWithRetry(workUnitState, countDownLatch);
this.taskStateTracker.registerNewTask(task);
task.setTaskFuture(this.taskExecutor.submit(task));
tasks.add(task);
} catch (Throwable e) {
if (e instanceof OutOfMemoryError) {
log.error("Encountering memory error in task creation/execution stage, please investigate memory usage:", e);
printMemoryUsage();
}
if (task == null) {
if (e instanceof RetryException) {
// Indicating task being null due to failure in creation even after retrying.
areAllTasksSubmitted = false;
}
// task could not be created, so directly count down
countDownLatch.countDown();
log.error("Could not create task for workunit {}", workUnit, e);
} else if (!task.hasTaskFuture()) {
// Task was created and may have been registered, but not submitted, so call the
// task state tracker task run completion directly since the task cancel does nothing if not submitted
this.taskStateTracker.onTaskRunCompletion(task);
areAllTasksSubmitted = false;
log.error("Could not submit task for workunit {}", workUnit, e);
} else {
// task was created and submitted, but failed later, so cancel the task to decrement the CountDownLatch
task.cancel();
log.error("Failure after task submitted for workunit {}", workUnit, e);
}
}
}
EventSubmitter.Builder eventSubmitterBuilder = new EventSubmitter.Builder(JobMetrics.get(this.jobId, new JobMetrics.CreatorTag(this.attemptId)).getMetricContext(),
"gobblin.runtime");
eventSubmitterBuilder.addMetadata(this.taskEventMetadataGenerator.getMetadata(jobState, JobEvent.TASKS_SUBMITTED));
eventSubmitterBuilder.build().submit(JobEvent.TASKS_SUBMITTED, "tasksCount", Integer.toString(tasks.size()));
return new Pair<>(tasks, areAllTasksSubmitted);
}
private void printMemoryUsage() {
MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
MemoryUsage heapMemory = memoryBean.getHeapMemoryUsage();
MemoryUsage nonHeapMemory = memoryBean.getNonHeapMemoryUsage();
String format = "%-15s%-15s%-15s%-15s";
this.log.info("Heap Memory");
this.log.info(String.format(format, "init", "used", "Committed", "max"));
this.log.info(String
.format(format, heapMemory.getInit(), heapMemory.getUsed(), heapMemory.getCommitted(), heapMemory.getMax()));
this.log.info("Non-heap Memory");
this.log.info(String.format(format, "init", "used", "Committed", "max"));
this.log.info(String.format(format, nonHeapMemory.getInit(), nonHeapMemory.getUsed(), nonHeapMemory.getCommitted(),
nonHeapMemory.getMax()));
}
private Task createTaskRunnable(WorkUnitState workUnitState, CountDownLatch countDownLatch) {
Optional<TaskFactory> taskFactoryOpt = TaskUtils.getTaskFactory(workUnitState);
final TaskContext taskContext = new TaskContext(workUnitState);
if (taskFactoryOpt.isPresent()) {
return new TaskIFaceWrapper(taskFactoryOpt.get().createTask(taskContext), taskContext, countDownLatch,
this.taskStateTracker);
} else {
return new Task(taskContext, this.taskStateTracker, this.taskExecutor, Optional.of(countDownLatch));
}
}
/**
* As the initialization of {@link Task} could have unstable external connection which could be healed through
* retry, adding retry-wrapper here for the sake of fault-tolerance.
*/
@VisibleForTesting
Task createTaskWithRetry(WorkUnitState workUnitState, CountDownLatch countDownLatch) throws RetryException {
Properties defaultRetryConfig = new Properties();
defaultRetryConfig.setProperty(RETRY_TIME_OUT_MS, TimeUnit.MINUTES.toMillis(1L) + "");
defaultRetryConfig.setProperty(RETRY_INTERVAL_MS, TimeUnit.SECONDS.toMillis(2L) + "");
Config config = ConfigUtils.propertiesToConfig(this.jobState.getProperties())
.withFallback(ConfigUtils.propertiesToConfig(defaultRetryConfig));
Retryer<Task> retryer = RetryerFactory.newInstance(config);
// An "effectively final" variable for counting how many retried has been done, mostly for logging purpose.
final AtomicInteger counter = new AtomicInteger(0);
try {
return retryer.call(new Callable<Task>() {
@Override
public Task call()
throws Exception {
counter.incrementAndGet();
log.info(String.format("Task creation attempt %s", counter.get()));
return createTaskRunnable(workUnitState, countDownLatch);
}
});
} catch (ExecutionException ee) {
throw new RuntimeException("Failure in executing retryer due to, ", ee);
}
}
public void runAndOptionallyCommitTaskAttempt(CommitPolicy multiTaskAttemptCommitPolicy)
throws IOException, InterruptedException {
run();
if (multiTaskAttemptCommitPolicy.equals(GobblinMultiTaskAttempt.CommitPolicy.IMMEDIATE)) {
this.log.info("Will commit tasks directly.");
commit();
} else if (!isSpeculativeExecutionSafe()) {
throw new RuntimeException(
"Speculative execution is enabled. However, the task context is not safe for speculative execution.");
}
}
/**
* <p> During the task execution, the fork/task instances will create metric contexts (fork, task, job, container)
* along the hierarchy up to the root metric context. Although root metric context has a weak reference to
* those metric contexts, they are meanwhile cached by GobblinMetricsRegistry. Here we will remove all those
* strong reference from the cache to make sure it can be reclaimed by Java GC when JVM has run out of memory.
*
* <p> Task metrics are cleaned by iterating all tasks. Job level metrics cleaning needs some caveat. The
* cleaning will only succeed if the creator of this job level metrics initiates the removal. This means if a task
* tries to remove the {@link JobMetrics} which is created by others, the removal won't take effect. This is handled by
* {@link JobMetrics#attemptRemove(String, Tag)}.
*/
public void cleanMetrics() {
tasks.forEach(task -> {
TaskMetrics.remove(task);
JobMetrics.attemptRemove(this.jobId, new JobMetrics.CreatorTag(task.getTaskId()));
});
JobMetrics.attemptRemove(this.jobId, new JobMetrics.CreatorTag(this.attemptId));
}
/**
* FIXME this method is provided for backwards compatibility in the LocalJobLauncher since it does
* not access the task state store. This should be addressed as all task executions should be
* updating the task state.
*/
public static GobblinMultiTaskAttempt runWorkUnits(JobContext jobContext, Iterator<WorkUnit> workUnits,
TaskStateTracker taskStateTracker, TaskExecutor taskExecutor, CommitPolicy multiTaskAttemptCommitPolicy)
throws IOException, InterruptedException {
GobblinMultiTaskAttempt multiTaskAttempt =
new GobblinMultiTaskAttempt(workUnits, jobContext.getJobId(), jobContext.getJobState(), taskStateTracker,
taskExecutor, Optional.<String>absent(), Optional.<StateStore<TaskState>>absent(),
jobContext.getJobBroker(), jobContext.getIssueRepository());
multiTaskAttempt.runAndOptionallyCommitTaskAttempt(multiTaskAttemptCommitPolicy);
return multiTaskAttempt;
}
/**
* Run a given list of {@link WorkUnit}s of a job.
*
* <p>
* This method creates {@link GobblinMultiTaskAttempt} to actually run the {@link Task}s of the {@link WorkUnit}s, and optionally commit.
* </p>
*
* @param jobId the job ID
* @param workUnits the given list of {@link WorkUnit}s to submit to run
* @param taskStateTracker a {@link TaskStateTracker} for task state tracking
* @param taskExecutor a {@link TaskExecutor} for task execution
* @param taskStateStore a {@link StateStore} for storing {@link TaskState}s
* @param multiTaskAttemptCommitPolicy {@link GobblinMultiTaskAttempt.CommitPolicy} for committing {@link GobblinMultiTaskAttempt}
* @throws IOException if there's something wrong with any IO operations
* @throws InterruptedException if the task execution gets cancelled
*/
public static GobblinMultiTaskAttempt runWorkUnits(String jobId, String containerId, JobState jobState,
List<WorkUnit> workUnits, TaskStateTracker taskStateTracker, TaskExecutor taskExecutor,
StateStore<TaskState> taskStateStore, CommitPolicy multiTaskAttemptCommitPolicy,
SharedResourcesBroker<GobblinScopeTypes> jobBroker, IssueRepository issueRepository,
Predicate<GobblinMultiTaskAttempt> interruptionPredicate)
throws IOException, InterruptedException {
// dump the work unit if tracking logs are enabled
if (jobState.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_ENABLE_TRACKING_LOGS)) {
Logger log = LoggerFactory.getLogger(GobblinMultiTaskAttempt.class.getName());
log.info("Work unit tracking log: {}", workUnits);
}
GobblinMultiTaskAttempt multiTaskAttempt =
new GobblinMultiTaskAttempt(workUnits.iterator(), jobId, jobState, taskStateTracker, taskExecutor,
Optional.of(containerId), Optional.of(taskStateStore), jobBroker, issueRepository);
multiTaskAttempt.setInterruptionPredicate(interruptionPredicate);
multiTaskAttempt.runAndOptionallyCommitTaskAttempt(multiTaskAttemptCommitPolicy);
return multiTaskAttempt;
}
public int getNumTasksCreated() {
return this.tasks.size();
}
}
| 1,336 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/NoopEventMetadataGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.metrics.event.EventName;
import org.apache.gobblin.runtime.api.EventMetadataGenerator;
@Alias("noop")
public class NoopEventMetadataGenerator implements EventMetadataGenerator{
public Map<String, String> getMetadata(JobContext jobContext, EventName eventName) {
return ImmutableMap.of();
}
}
| 1,337 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/CombinedWorkUnitAndDatasetStateGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.configuration.CombinedWorkUnitAndDatasetState;
import org.apache.gobblin.configuration.CombinedWorkUnitAndDatasetStateFunctional;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metastore.DatasetStateStore;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
/**
* A class that returns previous {@link WorkUnitState}s and {@link JobState.DatasetState}s from the state store
* as a {@link CombinedWorkUnitAndDatasetState}.
*/
public class CombinedWorkUnitAndDatasetStateGenerator implements CombinedWorkUnitAndDatasetStateFunctional {
private DatasetStateStore datasetStateStore;
private String jobName;
/**
* Constructor.
*
* @param datasetStateStore the dataset state store
* @param jobName the job name
*/
public CombinedWorkUnitAndDatasetStateGenerator(DatasetStateStore datasetStateStore, String jobName) {
this.datasetStateStore = datasetStateStore;
this.jobName = jobName;
}
@Override
public CombinedWorkUnitAndDatasetState getCombinedWorkUnitAndDatasetState(String datasetUrn)
throws Exception {
Map<String, JobState.DatasetState> datasetStateMap = ImmutableMap.of();
List<WorkUnitState> workUnitStates = new ArrayList<>();
if (Strings.isNullOrEmpty(datasetUrn)) {
datasetStateMap = this.datasetStateStore.getLatestDatasetStatesByUrns(this.jobName);
workUnitStates = JobState.workUnitStatesFromDatasetStates(datasetStateMap.values());
} else {
JobState.DatasetState datasetState =
(JobState.DatasetState) this.datasetStateStore.getLatestDatasetState(this.jobName, datasetUrn);
if (datasetState != null) {
datasetStateMap = ImmutableMap.of(datasetUrn, datasetState);
workUnitStates = JobState.workUnitStatesFromDatasetStates(Arrays.asList(datasetState));
}
}
return new CombinedWorkUnitAndDatasetState(workUnitStates, datasetStateMap);
}
}
| 1,338 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/LimitingExtractorDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TreeMap;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Closer;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.util.TaskMetrics;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.limiter.LimiterConfigurationKeys;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.limiter.Limiter;
/**
* A decorator class for {@link Extractor} that uses a {@link Limiter} on data record extraction.
*
* <p>
* The fact that the {@link Limiter} is passed in as a parameter to the constructor
* {@link LimitingExtractorDecorator#LimitingExtractorDecorator(Extractor, Limiter, TaskState)} (Extractor, Limiter, State)} (Extractor, Limiter)}
* means multiple {@link LimitingExtractorDecorator}s can share a single {@link Limiter}
* or each individual {@link LimitingExtractorDecorator} has its own {@link Limiter}.
* The first case is useful for throttling at above the task level, e.g., at the job level.
* </p>
*
* @param <S> output schema type
* @param <D> output record type
*
* @author Yinan Li
*/
public class LimitingExtractorDecorator<S, D> implements Extractor<S, D>, Decorator {
private final Extractor<S, D> extractor;
private final Limiter limiter;
private final TaskState taskState;
public static final String LIMITER_STOP_EVENT_NAME = "PrematureExtractorStop";
public static final String LIMITER_STOP_CAUSE_KEY = "limiterStopCause";
public static final String LIMITER_STOP_CAUSE_VALUE = "LimiterPermitAcquireFailure";
private EventSubmitter eventSubmitter;
public LimitingExtractorDecorator(Extractor<S, D> extractor, Limiter limiter, TaskState state) {
this.extractor = extractor;
this.limiter = limiter;
this.taskState = state;
this.limiter.start();
this.eventSubmitter = new EventSubmitter.Builder(TaskMetrics.get(taskState).getMetricContext(), "gobblin.runtime.task").build();
}
@Override
public Object getDecoratedObject() {
return this.extractor;
}
@Override
public S getSchema() throws IOException {
return this.extractor.getSchema();
}
/**
* Compose meta data when limiter fails to acquire permit
* The meta data key list is passed from source layer
* A prefix matching is used because some work unit {@link org.apache.gobblin.source.workunit.MultiWorkUnit} have packing strategy, which
* can append additional string after the key name
*
* @return String map representing all the meta data need to report. Return null if no meta data was found.
*/
private ImmutableMap<String, String> getLimiterStopMetadata() {
WorkUnit workUnit = this.taskState.getWorkunit();
Properties properties = workUnit.getProperties();
String metadataKeyList = properties.getProperty(LimiterConfigurationKeys.LIMITER_REPORT_KEY_LIST, LimiterConfigurationKeys.DEFAULT_LIMITER_REPORT_KEY_LIST);
List<String> keyList = Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(metadataKeyList);
if (keyList.isEmpty())
return ImmutableMap.of();
Set<String> names = properties.stringPropertyNames();
TreeMap<String, String> orderedProperties = new TreeMap<>();
for (String name : names) {
orderedProperties.put(name, properties.getProperty(name));
}
ImmutableMap.Builder builder = ImmutableMap.<String, String>builder();
for (String oldKey : keyList) {
builder.putAll(orderedProperties.subMap(oldKey, oldKey + Character.MAX_VALUE));
}
builder.put(LIMITER_STOP_CAUSE_KEY, LIMITER_STOP_CAUSE_VALUE);
return builder.build();
}
private void submitLimiterStopMetadataEvents (){
ImmutableMap<String, String> metaData = this.getLimiterStopMetadata();
if (!metaData.isEmpty()) {
this.eventSubmitter.submit(LIMITER_STOP_EVENT_NAME, metaData);
}
}
@Override
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
try (Closer closer = Closer.create()) {
if (closer.register(this.limiter.acquirePermits(1)) != null) {
return this.extractor.readRecord(reuse);
}
submitLimiterStopMetadataEvents();
return null;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted while trying to acquire the next permit", ie);
}
}
@Override
public long getExpectedRecordCount() {
return this.extractor.getExpectedRecordCount();
}
@Deprecated
@Override
public long getHighWatermark() {
return this.extractor.getHighWatermark();
}
@Override
public void close() throws IOException {
try {
this.extractor.close();
} finally {
this.limiter.stop();
}
}
}
| 1,339 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/AbstractTaskStateTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.Properties;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.MDC;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractIdleService;
import com.google.common.util.concurrent.ListeningScheduledExecutorService;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* An abstract implementation of {@link TaskStateTracker} that provides basic common functionality for
* platform-specific implementations.
*
* @author Yinan Li
*/
public abstract class AbstractTaskStateTracker extends AbstractIdleService implements TaskStateTracker {
// This is used to schedule and run task metrics updaters
private final ListeningScheduledExecutorService taskMetricsUpdaterExecutor;
private final Logger logger;
public AbstractTaskStateTracker(int coreThreadPoolSize, Logger logger) {
Preconditions.checkArgument(coreThreadPoolSize > 0, "Thread pool size should be positive");
this.taskMetricsUpdaterExecutor = ExecutorsUtils.loggingDecorator(
new ScheduledThreadPoolExecutor(coreThreadPoolSize,
ExecutorsUtils.newThreadFactory(Optional.of(logger), Optional.of("TaskStateTracker-%d"))));
this.logger = logger;
}
public AbstractTaskStateTracker(Properties properties, Logger logger) {
this(Integer.parseInt(properties.getProperty(ConfigurationKeys.TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE))), logger);
}
public AbstractTaskStateTracker(Configuration configuration, Logger logger) {
this(Integer.parseInt(configuration.get(ConfigurationKeys.TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_STATE_TRACKER_THREAD_POOL_CORE_SIZE))), logger);
}
@Override
protected void startUp() throws Exception {
this.logger.info("Starting the task state tracker");
}
@Override
protected void shutDown() throws Exception {
this.logger.info("Stopping the task state tracker");
ExecutorsUtils.shutdownExecutorService(this.taskMetricsUpdaterExecutor, Optional.of(this.logger));
}
/**
* Schedule a {@link TaskMetricsUpdater}.
*
* @param taskMetricsUpdater the {@link TaskMetricsUpdater} to schedule
* @param task the {@link Task} that the {@link TaskMetricsUpdater} is associated to
* @return a {@link java.util.concurrent.ScheduledFuture} corresponding to the scheduled {@link TaskMetricsUpdater}
*/
protected ScheduledFuture<?> scheduleTaskMetricsUpdater(Runnable taskMetricsUpdater, Task task) {
return this.taskMetricsUpdaterExecutor.scheduleAtFixedRate(taskMetricsUpdater,
task.getTaskContext().getStatusReportingInterval(), task.getTaskContext().getStatusReportingInterval(),
TimeUnit.MILLISECONDS);
}
/**
* A base class providing a default implementation for updating task metrics.
*
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
@Deprecated
protected class TaskMetricsUpdater implements Runnable {
protected final Task task;
public TaskMetricsUpdater(Task task) {
this.task = task;
}
@Override
public void run() {
MDC.put(ConfigurationKeys.TASK_KEY_KEY, task.getTaskKey());
updateTaskMetrics();
}
protected void updateTaskMetrics() {
if (GobblinMetrics.isEnabled(this.task.getTaskState().getWorkunit())) {
this.task.updateRecordMetrics();
this.task.updateByteMetrics();
}
}
}
}
| 1,340 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/EventMetadataUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.List;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* Utility methods for generating event metadata
*/
public class EventMetadataUtils {
public static final String TASK_FAILURE_MESSAGE_KEY = "task.failure.message";
public static final String JOB_FAILURE_MESSAGE_KEY = "job.failure.message";
/**
* Get the number of records written by all the writers
* @return Sum of the writer records written count across all tasks
*/
public static long getProcessedCount(List<TaskState> taskStates) {
long value = 0;
for (TaskState taskState : taskStates) {
value += taskState.getPropAsLong(ConfigurationKeys.WRITER_RECORDS_WRITTEN, 0);
}
return value;
}
/**
* Get failure messages
* @return The failure messages from the job state
*/
public static String getJobFailureExceptions(JobState jobState) {
StringBuffer sb = new StringBuffer();
if (jobState.contains(JOB_FAILURE_MESSAGE_KEY)) {
sb.append(jobState.getProp(JOB_FAILURE_MESSAGE_KEY));
}
if (jobState.contains(ConfigurationKeys.JOB_FAILURE_EXCEPTION_KEY)) {
if (sb.length() != 0) {
sb.append(",");
}
sb.append(jobState.getProp(ConfigurationKeys.JOB_FAILURE_EXCEPTION_KEY));
}
return sb.toString();
}
/**
* Get failure messages
* @return The concatenated failure messages from all the task states
*/
public static String getTaskFailureExceptions(List<TaskState> taskStates) {
StringBuffer sb = new StringBuffer();
// Add task failure messages in a group followed by task failure exceptions
appendTaskStateValues(taskStates, sb, TASK_FAILURE_MESSAGE_KEY);
appendTaskStateValues(taskStates, sb, ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY);
return sb.toString();
}
/**
* Append values for the given key from all {@link TaskState}s
* @param sb a {@link StringBuffer} to hold the output
* @param key the key of the values to retrieve
*/
private static void appendTaskStateValues(List<TaskState> taskStates, StringBuffer sb, String key) {
// Add task failure messages in a group followed by task failure exceptions
for (TaskState taskState : taskStates) {
if (taskState.contains(key)) {
if (sb.length() != 0) {
sb.append(",");
}
sb.append(taskState.getProp(key));
}
}
}
}
| 1,341 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import com.google.common.base.Enums;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.instrumented.converter.InstrumentedConverterDecorator;
import org.apache.gobblin.instrumented.fork.InstrumentedForkOperatorDecorator;
import org.apache.gobblin.publisher.TaskPublisher;
import org.apache.gobblin.publisher.TaskPublisherBuilderFactory;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyChecker;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyCheckerBuilderFactory;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyCheckResults;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyChecker;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyCheckerBuilderFactory;
import org.apache.gobblin.records.RecordStreamProcessor;
import org.apache.gobblin.runtime.util.TaskMetrics;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.limiter.DefaultLimiterFactory;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.NonRefillableLimiter;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.writer.DataWriterBuilder;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.WatermarkStorage;
import org.apache.gobblin.writer.WriterOutputFormat;
/**
* A class containing all necessary information to construct and run a {@link Task}.
*
* @author Yinan Li
*/
@Slf4j
public class TaskContext {
private final TaskState taskState;
private final TaskMetrics taskMetrics;
private Extractor rawSourceExtractor;
public TaskContext(WorkUnitState workUnitState) {
this.taskState = new TaskState(workUnitState);
this.taskMetrics = TaskMetrics.get(this.taskState);
this.taskState.setProp(Instrumented.METRIC_CONTEXT_NAME_KEY, this.taskMetrics.getName());
}
/**
* Get a {@link TaskState} instance for the task.
*
* @return a {@link TaskState} instance
*/
public TaskState getTaskState() {
return this.taskState;
}
/**
* Get a {@link TaskMetrics} instance for the task.
*
* @return a {@link TaskMetrics} instance
*/
public TaskMetrics getTaskMetrics() {
return this.taskMetrics;
}
/**
* Get a {@link Source} instance used to get a list of {@link WorkUnit}s.
*
* @return the {@link Source} used to get the {@link WorkUnit}, <em>null</em>
* if it fails to instantiate a {@link Source} object of the given class.
*/
public Source getSource() {
try {
return Source.class.cast(Class.forName(this.taskState.getProp(ConfigurationKeys.SOURCE_CLASS_KEY)).newInstance());
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
} catch (InstantiationException ie) {
throw new RuntimeException(ie);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
/**
* Get a {@link Extractor} instance.
*
* @return a {@link Extractor} instance
*/
public Extractor getExtractor() {
try {
this.rawSourceExtractor = getSource().getExtractor(this.taskState);
boolean throttlingEnabled = this.taskState.getPropAsBoolean(ConfigurationKeys.EXTRACT_LIMIT_ENABLED_KEY,
ConfigurationKeys.DEFAULT_EXTRACT_LIMIT_ENABLED);
if (throttlingEnabled) {
Limiter limiter = DefaultLimiterFactory.newLimiter(this.taskState);
if (!(limiter instanceof NonRefillableLimiter)) {
throw new IllegalArgumentException("The Limiter used with an Extractor should be an instance of "
+ NonRefillableLimiter.class.getSimpleName());
}
return new LimitingExtractorDecorator<>(this.rawSourceExtractor, limiter, this.taskState);
}
return this.rawSourceExtractor;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
public Extractor getRawSourceExtractor() {
return this.rawSourceExtractor;
}
/**
* Get the interval for status reporting.
*
* @return interval for status reporting
*/
public long getStatusReportingInterval() {
return this.taskState.getPropAsLong(ConfigurationKeys.TASK_STATUS_REPORT_INTERVAL_IN_MS_KEY,
ConfigurationKeys.DEFAULT_TASK_STATUS_REPORT_INTERVAL_IN_MS);
}
/**
* Get the writer {@link Destination.DestinationType}.
*
* @param branches number of forked branches
* @param index branch index
* @return writer {@link Destination.DestinationType}
*/
public Destination.DestinationType getDestinationType(int branches, int index) {
return Destination.DestinationType.valueOf(this.taskState.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, branches, index),
Destination.DestinationType.HDFS.name()));
}
/**
* Get the output format of the writer of type {@link WriterOutputFormat}.
*
* @param branches number of forked branches
* @param index branch index
* @return output format of the writer
*/
public WriterOutputFormat getWriterOutputFormat(int branches, int index) {
String writerOutputFormatValue = this.taskState.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, branches, index),
WriterOutputFormat.OTHER.name());
log.debug("Found writer output format value = {}", writerOutputFormatValue);
WriterOutputFormat wof = Enums.getIfPresent(WriterOutputFormat.class, writerOutputFormatValue.toUpperCase())
.or(WriterOutputFormat.OTHER);
log.debug("Returning writer output format = {}", wof);
return wof;
}
/**
* Get the list of pre-fork {@link Converter}s.
*
* @return list (possibly empty) of {@link Converter}s
*/
public List<Converter<?, ?, ?, ?>> getConverters() {
return getConverters(-1, this.taskState);
}
/**
* Get the list of post-fork {@link Converter}s for a given branch.
*
* @param index branch index
* @param forkTaskState a {@link TaskState} instance specific to the fork identified by the branch index
* @return list (possibly empty) of {@link Converter}s
*/
@SuppressWarnings("unchecked")
public List<Converter<?, ?, ?, ?>> getConverters(int index, TaskState forkTaskState) {
String converterClassKey =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.CONVERTER_CLASSES_KEY, index);
if (!this.taskState.contains(converterClassKey)) {
return Collections.emptyList();
}
if (index >= 0) {
forkTaskState.setProp(ConfigurationKeys.FORK_BRANCH_ID_KEY, index);
}
List<Converter<?, ?, ?, ?>> converters = Lists.newArrayList();
for (String converterClass : Splitter.on(",").omitEmptyStrings().trimResults()
.split(this.taskState.getProp(converterClassKey))) {
try {
Converter<?, ?, ?, ?> converter = Converter.class.cast(Class.forName(converterClass).newInstance());
InstrumentedConverterDecorator instrumentedConverter = new InstrumentedConverterDecorator<>(converter);
instrumentedConverter.init(forkTaskState);
converters.add(instrumentedConverter);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
} catch (InstantiationException ie) {
throw new RuntimeException(ie);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
return converters;
}
/**
* Get the list of pre-fork {@link RecordStreamProcessor}s.
*
* @return list (possibly empty) of {@link RecordStreamProcessor}s
*/
public List<RecordStreamProcessor<?, ?, ?, ?>> getRecordStreamProcessors() {
return getRecordStreamProcessors(-1, this.taskState);
}
/**
* Get the list of post-fork {@link RecordStreamProcessor}s for a given branch.
*
* @param index branch index
* @param forkTaskState a {@link TaskState} instance specific to the fork identified by the branch index
* @return list (possibly empty) of {@link RecordStreamProcessor}s
*/
@SuppressWarnings("unchecked")
public List<RecordStreamProcessor<?, ?, ?, ?>> getRecordStreamProcessors(int index, TaskState forkTaskState) {
String streamProcessorClassKey =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.RECORD_STREAM_PROCESSOR_CLASSES_KEY, index);
if (!this.taskState.contains(streamProcessorClassKey)) {
return Collections.emptyList();
}
if (index >= 0) {
forkTaskState.setProp(ConfigurationKeys.FORK_BRANCH_ID_KEY, index);
}
List<RecordStreamProcessor<?, ?, ?, ?>> streamProcessors = Lists.newArrayList();
for (String streamProcessorClass : Splitter.on(",").omitEmptyStrings().trimResults()
.split(this.taskState.getProp(streamProcessorClassKey))) {
try {
RecordStreamProcessor<?, ?, ?, ?> streamProcessor =
RecordStreamProcessor.class.cast(Class.forName(streamProcessorClass).newInstance());
if (streamProcessor instanceof Converter) {
InstrumentedConverterDecorator instrumentedConverter =
new InstrumentedConverterDecorator<>((Converter)streamProcessor);
instrumentedConverter.init(forkTaskState);
streamProcessors.add(instrumentedConverter);
} else {
streamProcessors.add(streamProcessor);
}
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
} catch (InstantiationException ie) {
throw new RuntimeException(ie);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
return streamProcessors;
}
/**
* Get the {@link ForkOperator} to be applied to converted input schema and data record.
*
* @return {@link ForkOperator} to be used or <code>null</code> if none is specified
*/
@SuppressWarnings("unchecked")
public ForkOperator getForkOperator() {
try {
ForkOperator fork =
ForkOperator.class.cast(Class.forName(this.taskState.getProp(ConfigurationKeys.FORK_OPERATOR_CLASS_KEY,
ConfigurationKeys.DEFAULT_FORK_OPERATOR_CLASS)).newInstance());
return new InstrumentedForkOperatorDecorator<>(fork);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
} catch (InstantiationException ie) {
throw new RuntimeException(ie);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
/**
* Get a pre-fork {@link RowLevelPolicyChecker} for executing row-level
* {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy}.
*
* @return a {@link RowLevelPolicyChecker}
*/
public RowLevelPolicyChecker getRowLevelPolicyChecker() throws Exception {
return getRowLevelPolicyChecker(-1);
}
/**
* Get a post-fork {@link RowLevelPolicyChecker} for executing row-level
* {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy} in the given branch.
*
* @param index branch index
* @return a {@link RowLevelPolicyChecker}
*/
public RowLevelPolicyChecker getRowLevelPolicyChecker(int index) throws Exception {
return RowLevelPolicyCheckerBuilderFactory.newPolicyCheckerBuilder(this.taskState, index).build();
}
/**
* Get a post-fork {@link TaskLevelPolicyChecker} for executing task-level
* {@link org.apache.gobblin.qualitychecker.task.TaskLevelPolicy} in the given branch.
*
* @param taskState {@link TaskState} of a {@link Task}
* @param index branch index
* @return a {@link TaskLevelPolicyChecker}
* @throws Exception
*/
public TaskLevelPolicyChecker getTaskLevelPolicyChecker(TaskState taskState, int index) throws Exception {
return TaskLevelPolicyCheckerBuilderFactory.newPolicyCheckerBuilder(taskState, index).build();
}
/**
* Get a post-fork {@link TaskPublisher} for publishing data in the given branch.
*
* @param taskState {@link TaskState} of a {@link Task}
* @param results Task-level policy checking results
* @return a {@link TaskPublisher}
*/
public TaskPublisher getTaskPublisher(TaskState taskState, TaskLevelPolicyCheckResults results) throws Exception {
return TaskPublisherBuilderFactory.newTaskPublisherBuilder(taskState, results).build();
}
/**
* Get a {@link DataWriterBuilder} for building a {@link org.apache.gobblin.writer.DataWriter}.
*
* @param branches number of forked branches
* @param index branch index
* @return a {@link DataWriterBuilder}
*/
public DataWriterBuilder getDataWriterBuilder(int branches, int index) {
String writerBuilderPropertyName = ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_BUILDER_CLASS, branches, index);
log.debug("Using property {} to get a writer builder for branches:{}, index:{}", writerBuilderPropertyName,
branches, index);
String dataWriterBuilderClassName = this.taskState.getProp(writerBuilderPropertyName, null);
if (dataWriterBuilderClassName == null) {
dataWriterBuilderClassName = ConfigurationKeys.DEFAULT_WRITER_BUILDER_CLASS;
log.info("No configured writer builder found, using {} as the default builder", dataWriterBuilderClassName);
} else {
log.info("Found configured writer builder as {}", dataWriterBuilderClassName);
}
try {
return DataWriterBuilder.class.cast(Class.forName(dataWriterBuilderClassName).newInstance());
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
} catch (InstantiationException ie) {
throw new RuntimeException(ie);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
public WatermarkStorage getWatermarkStorage() {
return new StateStoreBasedWatermarkStorage(taskState);
}
}
| 1,342 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskStateTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.google.common.util.concurrent.Service;
/**
* An interface for classes that track {@link TaskState}s.
*
* @author Yinan Li
*/
public interface TaskStateTracker extends Service {
/**
* Register a new {@link Task}.
*
* @param task {@link Task} to register
*/
public void registerNewTask(Task task);
/**
* Callback method when the {@link Task} completes running.
*
* @param task {@link Task} that runs completely.
*/
public void onTaskRunCompletion(Task task);
/**
* Callback method when the {@link Task} completes committing.
*
* @param task {@link Task} that commits completely.
*/
public void onTaskCommitCompletion(Task task);
}
| 1,343 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/CountBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.CountBasedLimiter}.
*/
@Deprecated
public class CountBasedLimiter extends org.apache.gobblin.util.limiter.CountBasedLimiter {
public CountBasedLimiter(long countLimit) {
super(countLimit);
}
}
| 1,344 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/Limiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.Limiter}.
*/
@Deprecated
public interface Limiter extends org.apache.gobblin.util.limiter.Limiter {
}
| 1,345 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/CountUpAndDownLatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import org.jetbrains.annotations.NotNull;
/**
* A {@link CountDownLatch} that allows counting up. Backed by a {@link Phaser}.
*/
class CountUpAndDownLatch extends CountDownLatch {
private final Phaser phaser;
AtomicLong totalParties = new AtomicLong();
public CountUpAndDownLatch(int count) {
super(0);
this.phaser = new Phaser(count) {
@Override
protected boolean onAdvance(int phase, int registeredParties) {
// Need to override onAdvance because phaser by default terminates whenever registered parties reaches 0
return false;
}
};
}
@Override
public void await() throws InterruptedException {
int phase = getPhase();
this.phaser.awaitAdvance(phase);
}
@Override
public boolean await(long timeout, @NotNull TimeUnit unit) throws InterruptedException {
try {
int phase = getPhase();
this.phaser.awaitAdvanceInterruptibly(phase, timeout, unit);
return true;
} catch (TimeoutException te) {
return false;
}
}
private int getPhase() {
int phase = this.phaser.register();
this.phaser.arriveAndDeregister();
return phase;
}
@Override
public void countDown() {
this.phaser.arriveAndDeregister();
}
public void countUp() {
this.phaser.register();
totalParties.addAndGet(1);
}
@Override
public long getCount() {
return this.phaser.getUnarrivedParties();
}
/**
* Because {@link #countDown()} de-registers a party. This method gives the same result as {@link #getCount()}.
* @return currently registered parties
*/
@Deprecated
public long getRegisteredParties() {
return this.phaser.getRegisteredParties();
}
@Override
public String toString() {
return "Unarrived parties: " + this.phaser.getUnarrivedParties() + "/" + totalParties;
}
}
| 1,346 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/NewTaskCompletionEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.Collection;
/**
* An event triggered upon the completion of one or more {@link Task}s.
*
* <p>
* This event carries the {@link TaskState}(s) of the completed {@link Task}(s). Classes that are
* interested in receiving the events can registered themselves to the
* {@link com.google.common.eventbus.EventBus} in {@link AbstractJobLauncher} to which the events
* are posted.
* </p>
*
* @author Yinan Li
*/
public class NewTaskCompletionEvent {
private final Collection<TaskState> taskStates;
public NewTaskCompletionEvent(Collection<TaskState> taskStates) {
this.taskStates = taskStates;
}
/**
* Get the {@link Collection} of {@link TaskState}s of completed {@link Task}s.
*
* @return the {@link Collection} of {@link TaskState}s of completed {@link Task}s
*/
public Collection<TaskState> getTaskStates() {
return this.taskStates;
}
}
| 1,347 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskStateCollectorServiceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import java.io.Closeable;
import java.util.Collection;
/**
* Define basic interface for Handler in TaskStateCollectorService,
* which runs in the gobblin's driver level.
*
*/
public interface TaskStateCollectorServiceHandler extends Closeable {
/**
* Interface of handler factory.
*/
interface TaskStateCollectorServiceHandlerFactory {
TaskStateCollectorServiceHandler createHandler(JobState jobState);
}
/**
* Execute the actions of handler.
*/
public void handle(Collection<? extends WorkUnitState> states) throws IOException;
}
| 1,348 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/DynamicConfigGeneratorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
/**
* For getting an instance of a {@link DynamicConfigGenerator}
*/
public class DynamicConfigGeneratorFactory {
/**
* Get an instance of a {@link DynamicConfigGenerator}
* @param config {@link Config} to pass to the constructor
* @return an instance of {@link DynamicConfigGenerator}
*/
public static DynamicConfigGenerator createDynamicConfigGenerator(Config config) {
String dynamicConfigGeneratorClassName =
ConfigUtils.getString(config, ConfigurationKeys.DYNAMIC_CONFIG_GENERATOR_CLASS_KEY,
ConfigurationKeys.DEFAULT_DYNAMIC_CONFIG_GENERATOR_CLASS_KEY);
try {
ClassAliasResolver<DynamicConfigGenerator> aliasResolver =
new ClassAliasResolver<>(DynamicConfigGenerator.class);
return aliasResolver.resolveClass(dynamicConfigGeneratorClassName).newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Could not construct DynamicConfigGenerator " +
dynamicConfigGeneratorClassName, e);
}
}
}
| 1,349 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/FsDatasetStateStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.metastore.DatasetStateStore;
@Alias("fs")
public class FsDatasetStateStoreFactory implements DatasetStateStore.Factory {
@Override
public DatasetStateStore<JobState.DatasetState> createStateStore(Config config) {
try {
return FsDatasetStateStore.createStateStore(config, FsDatasetStateStore.class.getName());
} catch (Exception e) {
throw new RuntimeException("Failed to create FsDatasetStateStore with factory", e);
}
}
} | 1,350 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.StringWriter;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.io.Text;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Meter;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.gson.reflect.TypeToken;
import com.google.gson.stream.JsonWriter;
import com.linkedin.data.template.StringMap;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.rest.Metric;
import org.apache.gobblin.rest.MetricArray;
import org.apache.gobblin.rest.MetricTypeEnum;
import org.apache.gobblin.rest.Table;
import org.apache.gobblin.rest.TableTypeEnum;
import org.apache.gobblin.rest.TaskExecutionInfo;
import org.apache.gobblin.rest.TaskStateEnum;
import org.apache.gobblin.runtime.job.TaskProgress;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.util.GsonUtils;
import org.apache.gobblin.runtime.util.MetricGroup;
import org.apache.gobblin.runtime.util.TaskMetrics;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* An extension to {@link WorkUnitState} with run-time task state information.
*
* @author Yinan Li
*/
public class TaskState extends WorkUnitState implements TaskProgress {
// Built-in metric names
/**
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
private static final String RECORDS = "records";
/**
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
private static final String RECORDS_PER_SECOND = "recordsPerSec";
/**
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
private static final String BYTES = "bytes";
/**
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
private static final String BYTES_PER_SECOND = "bytesPerSec";
/** ID of the job this {@link TaskState} is for */
@Getter @Setter
private String jobId;
/** ID of the task this {@link TaskState} is for */
@Getter @Setter
private String taskId;
/** sequence number of the task this {@link TaskState} is for */
@Getter
private String taskKey;
@Getter
private Optional<String> taskAttemptId;
/** task start time in milliseconds */
@Getter @Setter
private long startTime = 0;
/** task end time in milliseconds */
@Getter @Setter
private long endTime = 0;
/** task duration in milliseconds */
@Getter @Setter
private long taskDuration;
// Needed for serialization/deserialization
public TaskState() {}
public TaskState(WorkUnitState workUnitState) {
// Since getWorkunit() returns an immutable WorkUnit object,
// the WorkUnit object in this object is also immutable.
super(workUnitState.getWorkunit(), workUnitState.getJobState(), workUnitState.getTaskBrokerNullable());
addAll(workUnitState);
this.jobId = workUnitState.getProp(ConfigurationKeys.JOB_ID_KEY);
this.taskId = workUnitState.getProp(ConfigurationKeys.TASK_ID_KEY);
this.taskKey = workUnitState.getProp(ConfigurationKeys.TASK_KEY_KEY, "unknown_task_key");
this.taskAttemptId = Optional.fromNullable(workUnitState.getProp(ConfigurationKeys.TASK_ATTEMPT_ID_KEY));
this.setId(this.taskId);
}
public TaskState(TaskState taskState) {
super(taskState.getWorkunit(), taskState.getJobState(), taskState.getTaskBrokerNullable());
addAll(taskState);
this.jobId = taskState.getProp(ConfigurationKeys.JOB_ID_KEY);
this.taskId = taskState.getProp(ConfigurationKeys.TASK_ID_KEY);
this.taskAttemptId = taskState.getTaskAttemptId();
this.setId(this.taskId);
}
/**
* Get the {@link ConfigurationKeys#TASK_FAILURE_EXCEPTION_KEY} if it exists, else return {@link Optional#absent()}.
*/
public Optional<String> getTaskFailureException() {
return Optional.fromNullable(this.getProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY));
}
/**
* If not already present, set the {@link ConfigurationKeys#TASK_FAILURE_EXCEPTION_KEY} to a {@link String}
* representation of the given {@link Throwable}.
*/
public void setTaskFailureException(Throwable taskFailureException) {
if (!this.contains(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY)) {
this.setProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY,
Throwables.getStackTraceAsString(taskFailureException));
}
}
/**
* Returns list of task issues. Can be null, if no issues were set.
*/
@Nullable
public List<Issue> getTaskIssues() {
String serializedIssues = this.getProp(ConfigurationKeys.TASK_ISSUES_KEY, null);
if (serializedIssues == null) {
return null;
}
Type issueListTypeToken = new TypeToken<ArrayList<Issue>>() {
}.getType();
return GsonUtils.GSON_WITH_DATE_HANDLING.fromJson(serializedIssues, issueListTypeToken);
}
/**
* Saves the list of task issues into the state, so that parent job can aggregate and consume it. *
*/
public void setTaskIssues(List<Issue> issues) {
if (issues == null) {
this.removeProp(ConfigurationKeys.TASK_ISSUES_KEY);
} else {
String serializedIssues = GsonUtils.GSON_WITH_DATE_HANDLING.toJson(issues);
this.setProp(ConfigurationKeys.TASK_ISSUES_KEY, serializedIssues);
}
}
/**
* Return whether the task has completed running or not.
*
* @return {@code true} if the task has completed or {@code false} otherwise
*/
public boolean isCompleted() {
WorkingState state = getWorkingState();
return state == WorkingState.SUCCESSFUL || state == WorkingState.COMMITTED || state == WorkingState.FAILED;
}
/**
* Update record-level metrics.
*
* @param recordsWritten number of records written by the writer
* @param branchIndex fork branch index
*
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
public synchronized void updateRecordMetrics(long recordsWritten, int branchIndex) {
TaskMetrics metrics = TaskMetrics.get(this);
// chopping branch index from metric name
// String forkBranchId = ForkOperatorUtils.getForkId(this.taskId, branchIndex);
String forkBranchId = TaskMetrics.taskInstanceRemoved(this.taskId);
Counter taskRecordCounter = metrics.getCounter(MetricGroup.TASK.name(), forkBranchId, RECORDS);
long inc = recordsWritten - taskRecordCounter.getCount();
taskRecordCounter.inc(inc);
metrics.getMeter(MetricGroup.TASK.name(), forkBranchId, RECORDS_PER_SECOND).mark(inc);
metrics.getCounter(MetricGroup.JOB.name(), this.jobId, RECORDS).inc(inc);
metrics.getMeter(MetricGroup.JOB.name(), this.jobId, RECORDS_PER_SECOND).mark(inc);
}
/**
* Collect byte-level metrics.
*
* @param bytesWritten number of bytes written by the writer
* @param branchIndex fork branch index
*
* @deprecated see {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase}.
*/
public synchronized void updateByteMetrics(long bytesWritten, int branchIndex) {
TaskMetrics metrics = TaskMetrics.get(this);
String forkBranchId = TaskMetrics.taskInstanceRemoved(this.taskId);
Counter taskByteCounter = metrics.getCounter(MetricGroup.TASK.name(), forkBranchId, BYTES);
long inc = bytesWritten - taskByteCounter.getCount();
taskByteCounter.inc(inc);
metrics.getMeter(MetricGroup.TASK.name(), forkBranchId, BYTES_PER_SECOND).mark(inc);
metrics.getCounter(MetricGroup.JOB.name(), this.jobId, BYTES).inc(inc);
metrics.getMeter(MetricGroup.JOB.name(), this.jobId, BYTES_PER_SECOND).mark(inc);
}
/**
* Adjust job-level metrics when the task gets retried.
*
* @param branches number of forked branches
*/
public void adjustJobMetricsOnRetry(int branches) {
TaskMetrics metrics = TaskMetrics.get(this);
for (int i = 0; i < branches; i++) {
String forkBranchId = ForkOperatorUtils.getForkId(this.taskId, i);
long recordsWritten = metrics.getCounter(MetricGroup.TASK.name(), forkBranchId, RECORDS).getCount();
long bytesWritten = metrics.getCounter(MetricGroup.TASK.name(), forkBranchId, BYTES).getCount();
metrics.getCounter(MetricGroup.JOB.name(), this.jobId, RECORDS).dec(recordsWritten);
metrics.getCounter(MetricGroup.JOB.name(), this.jobId, BYTES).dec(bytesWritten);
}
}
@Override
public void readFields(DataInput in) throws IOException {
Text text = new Text();
text.readFields(in);
this.jobId = text.toString().intern();
text.readFields(in);
this.taskId = text.toString().intern();
this.taskAttemptId = Optional.absent();
this.setId(this.taskId);
this.startTime = in.readLong();
this.endTime = in.readLong();
this.taskDuration = in.readLong();
super.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
Text text = new Text();
text.set(this.jobId);
text.write(out);
text.set(this.taskId);
text.write(out);
out.writeLong(this.startTime);
out.writeLong(this.endTime);
out.writeLong(this.taskDuration);
super.write(out);
}
@Override
public boolean equals(Object object) {
if (!(object instanceof TaskState)) {
return false;
}
TaskState other = (TaskState) object;
return super.equals(other) && this.jobId.equals(other.jobId) && this.taskId.equals(other.taskId);
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + this.jobId.hashCode();
result = prime * result + this.taskId.hashCode();
return result;
}
/** @return pretty-printed JSON, including all properties */
public String toJsonString() {
return toJsonString(true);
}
/** @return pretty-printed JSON, optionally including properties */
public String toJsonString(boolean includeProperties) {
StringWriter stringWriter = new StringWriter();
try (JsonWriter jsonWriter = new JsonWriter(stringWriter)) {
jsonWriter.setIndent("\t");
this.toJson(jsonWriter, includeProperties);
} catch (IOException ioe) {
// Ignored
}
return stringWriter.toString();
}
/**
* Convert this {@link TaskState} to a json document.
*
* @param jsonWriter a {@link com.google.gson.stream.JsonWriter} used to write the json document
* @throws IOException
*/
public void toJson(JsonWriter jsonWriter, boolean keepConfig) throws IOException {
jsonWriter.beginObject();
jsonWriter.name("task id").value(this.getTaskId()).name("task state").value(this.getWorkingState().name())
.name("start time").value(this.getStartTime()).name("end time").value(this.getEndTime()).name("duration")
.value(this.getTaskDuration()).name("retry count")
.value(this.getPropAsInt(ConfigurationKeys.TASK_RETRIES_KEY, 0));
// Also add failure exception information if it exists. This information is useful even in the
// case that the task finally succeeds so we know what happened in the course of task execution.
if (getTaskFailureException().isPresent()) {
jsonWriter.name("exception").value(getTaskFailureException().get());
}
if (keepConfig) {
jsonWriter.name("properties");
jsonWriter.beginObject();
for (String key : this.getPropertyNames()) {
jsonWriter.name(key).value(this.getProp(key));
}
jsonWriter.endObject();
}
jsonWriter.endObject();
}
/**
* Convert this {@link TaskState} instance to a {@link TaskExecutionInfo} instance.
*
* @return a {@link TaskExecutionInfo} instance
*/
public TaskExecutionInfo toTaskExecutionInfo() {
TaskExecutionInfo taskExecutionInfo = new TaskExecutionInfo();
taskExecutionInfo.setJobId(this.jobId);
taskExecutionInfo.setTaskId(this.taskId);
if (this.startTime > 0) {
taskExecutionInfo.setStartTime(this.startTime);
}
if (this.endTime > 0) {
taskExecutionInfo.setEndTime(this.endTime);
}
taskExecutionInfo.setDuration(this.taskDuration);
taskExecutionInfo.setState(TaskStateEnum.valueOf(getWorkingState().name()));
if (this.contains(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY)) {
taskExecutionInfo.setFailureException(this.getProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY));
}
taskExecutionInfo.setHighWatermark(this.getHighWaterMark());
// Add extract/table information
Table table = new Table();
Extract extract = this.getExtract();
table.setNamespace(extract.getNamespace());
table.setName(extract.getTable());
if (extract.hasType()) {
table.setType(TableTypeEnum.valueOf(extract.getType().name()));
}
taskExecutionInfo.setTable(table);
// Add task metrics
TaskMetrics taskMetrics = TaskMetrics.get(this);
MetricArray metricArray = new MetricArray();
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : taskMetrics.getMetricContext().getCounters()
.entrySet()) {
Metric counter = new Metric();
counter.setGroup(MetricGroup.TASK.name());
counter.setName(entry.getKey());
counter.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.COUNTER.name()));
counter.setValue(Long.toString(((Counter) entry.getValue()).getCount()));
metricArray.add(counter);
}
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : taskMetrics.getMetricContext().getMeters()
.entrySet()) {
Metric meter = new Metric();
meter.setGroup(MetricGroup.TASK.name());
meter.setName(entry.getKey());
meter.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.METER.name()));
meter.setValue(Double.toString(((Meter) entry.getValue()).getMeanRate()));
metricArray.add(meter);
}
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : taskMetrics.getMetricContext().getGauges()
.entrySet()) {
Metric gauge = new Metric();
gauge.setGroup(MetricGroup.TASK.name());
gauge.setName(entry.getKey());
gauge.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.GAUGE.name()));
gauge.setValue(((Gauge<?>) entry.getValue()).getValue().toString());
metricArray.add(gauge);
}
taskExecutionInfo.setMetrics(metricArray);
// Add task properties
Map<String, String> taskProperties = Maps.newHashMap();
for (String name : this.getPropertyNames()) {
String value = this.getProp(name);
if (!Strings.isNullOrEmpty(value))
taskProperties.put(name, value);
}
taskExecutionInfo.setTaskProperties(new StringMap(taskProperties));
return taskExecutionInfo;
}
}
| 1,351 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Properties;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricSet;
import com.codahale.metrics.SlidingTimeWindowReservoir;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.AbstractIdleService;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.runtime.fork.Fork;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import lombok.Getter;
import static com.codahale.metrics.MetricRegistry.name;
/**
* A class for executing {@link Task}s and retrying failed ones as well as for executing {@link Fork}s.
*
* @author Yinan Li
*/
public class TaskExecutor extends AbstractIdleService {
private static final Logger LOG = LoggerFactory.getLogger(TaskExecutor.class);
// Thread pool executor for running tasks
private final ScheduledExecutorService taskExecutor;
// A separate thread pool executor for running forks of tasks
@Getter
private final ExecutorService forkExecutor;
// Task retry interval
@Getter
private final long retryIntervalInSeconds;
// The maximum number of items in the queued task time map.
@Getter
private final int queuedTaskTimeMaxSize;
// The maximum age of the items in the queued task time map.
@Getter
private final long queuedTaskTimeMaxAge ;
// Map of queued task ids to queue times. The key is the task id, the value is the time the task was queued. If the
// task is being retried, the time may be in the future. Entries with time in the future will not be counted as
// queued until the time is in the past.
private final Map<String, Long> queuedTasks = Maps.newConcurrentMap();
// Set of historical queued task times. The key is the UTC epoch time the task started, the value is the milliseconds
// the task waited to start.
private final ConcurrentSkipListMap<Long, Long> queuedTaskTimeHistorical = new ConcurrentSkipListMap<>();
// The timestamp for the last time the metrics were calculated.
private long lastCalculationTime = 0;
// The total number of tasks currently queued and queued over the historical lookback period.
@Getter
private AtomicInteger queuedTaskCount = new AtomicInteger();
// The total number of tasks currently queued.
@Getter
private AtomicInteger currentQueuedTaskCount = new AtomicInteger();
// The total number of tasks queued over the historical lookback period.
@Getter
private AtomicInteger historicalQueuedTaskCount = new AtomicInteger();
// The total time tasks have currently been in the queue and were in the queue during the historical lookback period.
@Getter
private AtomicLong queuedTaskTotalTime = new AtomicLong();
// The total time tasks have currently been in the queue.
@Getter
private AtomicLong currentQueuedTaskTotalTime = new AtomicLong();
// The total time tasks have been in the queue during the historical lookback period.
@Getter
private AtomicLong historicalQueuedTaskTotalTime = new AtomicLong();
// Count of running tasks.
@Getter
private final Counter runningTaskCount = new Counter();
// Count of failed tasks.
@Getter
private final Meter successfulTaskCount = new Meter();
// Count of failed tasks.
@Getter
private final Meter failedTaskCount = new Meter();
@Getter
private final Timer taskCreateAndRunTimer;
// The metric set exposed from the task executor.
private final TaskExecutorQueueMetricSet metricSet = new TaskExecutorQueueMetricSet();
/**
* Constructor used internally.
*/
private TaskExecutor(int taskExecutorThreadPoolSize, int coreRetryThreadPoolSize, long retryIntervalInSeconds,
int queuedTaskTimeMaxSize, long queuedTaskTimeMaxAge, int timerWindowSize) {
Preconditions.checkArgument(taskExecutorThreadPoolSize > 0, "Task executor thread pool size should be positive");
Preconditions.checkArgument(retryIntervalInSeconds > 0, "Task retry interval should be positive");
Preconditions.checkArgument(queuedTaskTimeMaxSize > 0, "Queued task time max size should be positive");
Preconditions.checkArgument(queuedTaskTimeMaxAge > 0, "Queued task time max age should be positive");
// Currently a fixed-size thread pool is used to execute tasks. We probably need to revisit this later.
this.taskExecutor = ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool(
taskExecutorThreadPoolSize,
ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("TaskExecutor-%d"))));
this.retryIntervalInSeconds = retryIntervalInSeconds;
this.queuedTaskTimeMaxSize = queuedTaskTimeMaxSize;
this.queuedTaskTimeMaxAge = queuedTaskTimeMaxAge;
this.taskCreateAndRunTimer = new Timer(new SlidingTimeWindowReservoir(timerWindowSize, TimeUnit.MINUTES));
this.forkExecutor = ExecutorsUtils.loggingDecorator(
new ThreadPoolExecutor(
// The core thread pool size is equal to that of the task
// executor as there's at least one fork per task
taskExecutorThreadPoolSize,
// The fork executor thread pool size is essentially unbounded. This is to make sure all forks of
// a task get a thread to run so all forks of the task are making progress. This is necessary since
// otherwise the parent task will be blocked if the record queue (bounded) of some fork is full and
// that fork has not yet started to run because of no available thread. The task cannot proceed in
// this case because it has to make sure every records go to every forks.
Integer.MAX_VALUE,
0L,
TimeUnit.MILLISECONDS,
// The work queue is a SynchronousQueue. This essentially forces a new thread to be created for each fork.
new SynchronousQueue<Runnable>(),
ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("ForkExecutor-%d"))));
}
/**
* Constructor to work with {@link java.util.Properties}.
*/
public TaskExecutor(Properties properties) {
this(Integer.parseInt(properties.getProperty(ConfigurationKeys.TASK_EXECUTOR_THREADPOOL_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_EXECUTOR_THREADPOOL_SIZE))),
Integer.parseInt(properties.getProperty(ConfigurationKeys.TASK_RETRY_THREAD_POOL_CORE_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_RETRY_THREAD_POOL_CORE_SIZE))),
Long.parseLong(properties.getProperty(ConfigurationKeys.TASK_RETRY_INTERVAL_IN_SEC_KEY,
Long.toString(ConfigurationKeys.DEFAULT_TASK_RETRY_INTERVAL_IN_SEC))),
Integer.parseInt(properties.getProperty(ConfigurationKeys.QUEUED_TASK_TIME_MAX_SIZE,
Integer.toString(ConfigurationKeys.DEFAULT_QUEUED_TASK_TIME_MAX_SIZE))),
Long.parseLong(properties.getProperty(ConfigurationKeys.QUEUED_TASK_TIME_MAX_AGE,
Long.toString(ConfigurationKeys.DEFAULT_QUEUED_TASK_TIME_MAX_AGE))),
Integer.parseInt(properties.getProperty(ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES,
Integer.toString(ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES))));
}
/**
* Constructor to work with Hadoop {@link org.apache.hadoop.conf.Configuration}.
*/
public TaskExecutor(Configuration conf) {
this(conf.getInt(ConfigurationKeys.TASK_EXECUTOR_THREADPOOL_SIZE_KEY,
ConfigurationKeys.DEFAULT_TASK_EXECUTOR_THREADPOOL_SIZE),
conf.getInt(ConfigurationKeys.TASK_RETRY_THREAD_POOL_CORE_SIZE_KEY,
ConfigurationKeys.DEFAULT_TASK_RETRY_THREAD_POOL_CORE_SIZE),
conf.getLong(ConfigurationKeys.TASK_RETRY_INTERVAL_IN_SEC_KEY,
ConfigurationKeys.DEFAULT_TASK_RETRY_INTERVAL_IN_SEC),
conf.getInt(ConfigurationKeys.QUEUED_TASK_TIME_MAX_SIZE,
ConfigurationKeys.DEFAULT_QUEUED_TASK_TIME_MAX_SIZE),
conf.getLong(ConfigurationKeys.QUEUED_TASK_TIME_MAX_AGE,
ConfigurationKeys.DEFAULT_QUEUED_TASK_TIME_MAX_AGE),
conf.getInt(ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES,
ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES));
Log4jConfigurationHelper.setLogLevel(conf.getTrimmedStringCollection(Log4jConfigurationHelper.LOG_LEVEL_OVERRIDE_MAP));
}
@Override
protected void startUp()
throws Exception {
LOG.info("Starting the task executor");
if (this.taskExecutor.isShutdown() || this.taskExecutor.isTerminated()) {
throw new IllegalStateException("Task thread pool executor is shutdown or terminated");
}
if (this.forkExecutor.isShutdown() || this.forkExecutor.isTerminated()) {
throw new IllegalStateException("Fork thread pool executor is shutdown or terminated");
}
}
@Override
protected void shutDown()
throws Exception {
LOG.info("Stopping the task executor");
try {
ExecutorsUtils.shutdownExecutorService(this.taskExecutor, Optional.of(LOG));
} finally {
ExecutorsUtils.shutdownExecutorService(this.forkExecutor, Optional.of(LOG));
}
}
/**
* Execute a {@link Task}.
*
* @param task {@link Task} to be executed
*/
public void execute(Task task) {
LOG.info(String.format("Executing task %s", task.getTaskId()));
this.taskExecutor.execute(new TrackingTask(task));
}
/**
* Submit a {@link Task} to run.
*
* @param task {@link Task} to be submitted
* @return a {@link java.util.concurrent.Future} for the submitted {@link Task}
*/
public Future<?> submit(Task task) {
LOG.info(String.format("Submitting task %s", task.getTaskId()));
return this.taskExecutor.submit(new TrackingTask(task));
}
/**
* Execute a {@link Fork}.
*
* @param fork {@link Fork} to be executed
*/
public void execute(Fork fork) {
LOG.info(String.format("Executing fork %d of task %s", fork.getIndex(), fork.getTaskId()));
this.forkExecutor.execute(fork);
}
/**
* Submit a {@link Fork} to run.
*
* @param fork {@link Fork} to be submitted
* @return a {@link java.util.concurrent.Future} for the submitted {@link Fork}
*/
public Future<?> submit(Fork fork) {
LOG.info(String.format("Submitting fork %d of task %s", fork.getIndex(), fork.getTaskId()));
return this.forkExecutor.submit(fork);
}
/**
* Retry a failed {@link Task}.
*
* @param task failed {@link Task} to be retried
*/
public void retry(Task task) {
if (GobblinMetrics.isEnabled(task.getTaskState().getWorkunit()) &&
task.getTaskState().contains(ConfigurationKeys.FORK_BRANCHES_KEY)) {
// Adjust metrics to clean up numbers from the failed task
task.getTaskState()
.adjustJobMetricsOnRetry(task.getTaskState().getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY));
}
// Task retry interval increases linearly with number of retries
long interval = task.getRetryCount() * this.retryIntervalInSeconds;
// Schedule the retry of the failed task
this.taskExecutor.schedule(new TrackingTask(task, interval, TimeUnit.SECONDS), interval, TimeUnit.SECONDS);
LOG.info(String.format("Scheduled retry of failed task %s to run in %d seconds", task.getTaskId(), interval));
task.incrementRetryCount();
}
public MetricSet getTaskExecutorQueueMetricSet() {
return this.metricSet;
}
private synchronized void calculateMetrics() {
long currentTimeMillis = System.currentTimeMillis();
if (lastCalculationTime < currentTimeMillis - TimeUnit.SECONDS.toMillis(10)) {
LOG.debug("Starting metric calculation.");
int currentQueuedTaskCount = 0;
int futureQueuedTaskCount = 0;
long currentQueuedTaskTotalTime = 0;
for (Map.Entry<String, Long> queuedTask : this.queuedTasks.entrySet()) {
if (queuedTask.getValue() <= currentTimeMillis) {
currentQueuedTaskCount++;
long currentQueuedTaskTime = currentTimeMillis - queuedTask.getValue();
currentQueuedTaskTotalTime += currentQueuedTaskTime;
LOG.debug(String.format("Task %s has been waiting in the queue for %d ms.", queuedTask.getKey(), currentQueuedTaskTime));
} else {
futureQueuedTaskCount++;
}
}
if (futureQueuedTaskCount > 0) {
LOG.debug(String.format("%d tasks were ignored during metric calculations because they are scheduled to run in the future.", futureQueuedTaskCount));
}
this.currentQueuedTaskCount.set(currentQueuedTaskCount);
this.currentQueuedTaskTotalTime.set(currentQueuedTaskTotalTime);
LOG.debug(String.format("%d current tasks have been waiting for a total of %d ms.", currentQueuedTaskCount, currentQueuedTaskTotalTime));
int historicalQueuedTaskCount = 0;
long historicalQueuedTaskTotalTime = 0;
long cutoff = currentTimeMillis - queuedTaskTimeMaxAge;
Iterator<Map.Entry<Long, Long>> iterator = queuedTaskTimeHistorical.descendingMap().entrySet().iterator();
while (iterator.hasNext()) {
try {
Map.Entry<Long, Long> historicalQueuedTask = iterator.next();
if (historicalQueuedTask.getKey() < cutoff || historicalQueuedTaskCount >= queuedTaskTimeMaxSize) {
LOG.debug(String.format("Task started at %d is before the cutoff of %d and is being removed. Queue time %d will be removed from metric calculations.", historicalQueuedTask.getKey(), cutoff, historicalQueuedTask.getValue()));
iterator.remove();
} else {
historicalQueuedTaskCount++;
historicalQueuedTaskTotalTime += historicalQueuedTask.getValue();
LOG.debug(String.format("Task started at %d is after cutoff. Queue time %d will be used in metric calculations.", historicalQueuedTask.getKey(), historicalQueuedTask.getValue()));
}
} catch (NoSuchElementException e) {
LOG.warn("Ran out of items in historical task queue time set.");
}
}
this.historicalQueuedTaskCount.set(historicalQueuedTaskCount);
this.historicalQueuedTaskTotalTime.set(historicalQueuedTaskTotalTime);
LOG.debug(String.format("%d historical tasks have been waiting for a total of %d ms.", historicalQueuedTaskCount, historicalQueuedTaskTotalTime));
int totalQueuedTaskCount = currentQueuedTaskCount + historicalQueuedTaskCount;
long totalQueuedTaskTime = currentQueuedTaskTotalTime + historicalQueuedTaskTotalTime;
this.queuedTaskCount.set(totalQueuedTaskCount);
this.queuedTaskTotalTime.set(totalQueuedTaskTime);
LOG.debug(String.format("%d tasks have been waiting for a total of %d ms.", totalQueuedTaskCount, totalQueuedTaskTime));
this.lastCalculationTime = currentTimeMillis;
LOG.debug("Finished metric calculation.");
} else {
LOG.debug("Skipped metric calculation because not enough time has elapsed since the last calculation.");
}
}
private class TaskExecutorQueueMetricSet implements MetricSet {
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> metrics = new HashMap<>();
metrics.put(name("queued", "current", "count"), new Gauge<Integer>() {
@Override
public Integer getValue() {
calculateMetrics();
return currentQueuedTaskCount.intValue();
}
});
metrics.put(name("queued", "historical", "count"), new Gauge<Integer>() {
@Override
public Integer getValue() {
calculateMetrics();
return historicalQueuedTaskCount.intValue();
}
});
metrics.put(name("queued", "count"), new Gauge<Integer>() {
@Override
public Integer getValue() {
calculateMetrics();
return queuedTaskCount.intValue();
}
});
metrics.put(name("queued", "current", "time", "total"), new Gauge<Long>() {
@Override
public Long getValue() {
calculateMetrics();
return currentQueuedTaskTotalTime.longValue();
}
});
metrics.put(name("queued", "historical", "time", "total"), new Gauge<Long>() {
@Override
public Long getValue() {
calculateMetrics();
return historicalQueuedTaskTotalTime.longValue();
}
});
metrics.put(name("queued", "time", "total"), new Gauge<Long>() {
@Override
public Long getValue() {
calculateMetrics();
return queuedTaskTotalTime.longValue();
}
});
metrics.put(name("running", "count"), runningTaskCount);
metrics.put(name("successful", "count"), successfulTaskCount);
metrics.put(name("failed", "count"), failedTaskCount);
return Collections.unmodifiableMap(metrics);
}
}
private class TrackingTask implements Runnable {
private Task underlyingTask;
public TrackingTask(Task task) {
this(task, 0, TimeUnit.SECONDS);
}
public TrackingTask(Task task, long interval, TimeUnit timeUnit) {
long now = System.currentTimeMillis();
long timeToRun = now + timeUnit.toMillis(interval);
LOG.debug(String.format("Task %s queued to run %s.", task.getTaskId(), timeToRun <= now ? "now" : "at " + timeToRun));
queuedTasks.putIfAbsent(task.getTaskId(), timeToRun);
this.underlyingTask = task;
}
@Override
public void run() {
long startTime = System.currentTimeMillis();
onStart(startTime);
try {
this.underlyingTask.run();
successfulTaskCount.mark();
} catch (Exception e) {
failedTaskCount.mark();
LOG.error(String.format("Task %s failed", underlyingTask.getTaskId()), e);
throw e;
} finally {
runningTaskCount.dec();
}
}
private void onStart(long startTime) {
Long queueTime = queuedTasks.remove(this.underlyingTask.getTaskId());
long workUnitCreationTime = this.underlyingTask.getTaskContext().getTaskState().getPropAsLong(ConfigurationKeys.WORK_UNIT_CREATION_TIME_IN_MILLIS, 0);
long timeInQueue = startTime - queueTime;
long timeSinceWorkUnitCreation = startTime - workUnitCreationTime;
taskCreateAndRunTimer.update(timeSinceWorkUnitCreation, TimeUnit.MILLISECONDS);
LOG.debug(String.format("Task %s started. Saving queued time of %d ms to history.", underlyingTask.getTaskId(), timeInQueue));
queuedTaskTimeHistorical.putIfAbsent(System.currentTimeMillis(), timeInQueue);
runningTaskCount.inc();
}
}
}
| 1,352 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobExecutionEventSubmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import static org.apache.gobblin.metrics.event.JobEvent.JOB_STATE;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_ID;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_NAME;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_START_TIME;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_END_TIME;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_STATE;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_LAUNCHED_TASKS;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_COMPLETED_TASKS;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_LAUNCHER_TYPE;
import static org.apache.gobblin.metrics.event.JobEvent.METADATA_JOB_TRACKING_URL;
import static org.apache.gobblin.metrics.event.TaskEvent.*;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.metrics.event.EventSubmitter;
import lombok.AllArgsConstructor;
import org.apache.gobblin.metrics.event.TimingEvent;
@AllArgsConstructor
/**
* Submits metadata about a completed {@link JobState} using the provided {@link EventSubmitter}.
*/
public class JobExecutionEventSubmitter {
private final EventSubmitter eventSubmitter;
// The value of any metadata key that cannot be determined
private static final String UNKNOWN_VALUE = "UNKNOWN";
/**
* Submits metadata about a given {@link JobState} and each of its {@link TaskState}s. This method will submit a
* single event for the {@link JobState} called {@link #JOB_STATE_EVENT}. It will submit an event for each
* {@link TaskState} called {@link #TASK_STATE_EVENT}.
*
* @param jobState is the {@link JobState} to emit events for
*/
public void submitJobExecutionEvents(JobState jobState) {
submitJobStateEvent(jobState);
submitTaskStateEvents(jobState);
}
/**
* Submits an event for the given {@link JobState}.
*/
private void submitJobStateEvent(JobState jobState) {
ImmutableMap.Builder<String, String> jobMetadataBuilder = new ImmutableMap.Builder<>();
jobMetadataBuilder.put(METADATA_JOB_ID, jobState.getJobId());
jobMetadataBuilder.put(METADATA_JOB_NAME, jobState.getJobName());
jobMetadataBuilder.put(METADATA_JOB_START_TIME, Long.toString(jobState.getStartTime()));
jobMetadataBuilder.put(METADATA_JOB_END_TIME, Long.toString(jobState.getEndTime()));
jobMetadataBuilder.put(METADATA_JOB_STATE, jobState.getState().toString());
jobMetadataBuilder.put(METADATA_JOB_LAUNCHED_TASKS, Integer.toString(jobState.getTaskCount()));
jobMetadataBuilder.put(METADATA_JOB_COMPLETED_TASKS, Integer.toString(jobState.getCompletedTasks()));
jobMetadataBuilder.put(METADATA_JOB_LAUNCHER_TYPE, jobState.getLauncherType().toString());
jobMetadataBuilder.put(METADATA_JOB_TRACKING_URL, jobState.getTrackingURL().or(UNKNOWN_VALUE));
jobMetadataBuilder.put(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD, jobState.getProp(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD, ""));
jobMetadataBuilder.put(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD, jobState.getProp(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD, ""));
jobMetadataBuilder.put(EventSubmitter.EVENT_TYPE, JOB_STATE);
this.eventSubmitter.submit(JOB_STATE, jobMetadataBuilder.build());
}
/**
* Submits an event for each {@link TaskState} in the given {@link JobState}.
*/
private void submitTaskStateEvents(JobState jobState) {
// Build Job Metadata applicable for TaskStates
ImmutableMap.Builder<String, String> jobMetadataBuilder = new ImmutableMap.Builder<>();
jobMetadataBuilder.put(METADATA_JOB_ID, jobState.getJobId());
jobMetadataBuilder.put(METADATA_JOB_NAME, jobState.getJobName());
jobMetadataBuilder.put(METADATA_JOB_TRACKING_URL, jobState.getTrackingURL().or(UNKNOWN_VALUE));
Map<String, String> jobMetadata = jobMetadataBuilder.build();
// Submit event for each TaskState
for (TaskState taskState : jobState.getTaskStates()) {
submitTaskStateEvent(taskState, jobMetadata);
}
}
/**
* Submits an event for a given {@link TaskState}. It will include all metadata specified in the jobMetadata parameter.
*/
private void submitTaskStateEvent(TaskState taskState, Map<String, String> jobMetadata) {
ImmutableMap.Builder<String, String> taskMetadataBuilder = new ImmutableMap.Builder<>();
taskMetadataBuilder.putAll(jobMetadata);
taskMetadataBuilder.put(METADATA_TASK_ID, taskState.getTaskId());
taskMetadataBuilder.put(METADATA_TASK_START_TIME, Long.toString(taskState.getStartTime()));
taskMetadataBuilder.put(METADATA_TASK_END_TIME, Long.toString(taskState.getEndTime()));
taskMetadataBuilder.put(METADATA_TASK_WORKING_STATE, taskState.getWorkingState().toString());
taskMetadataBuilder.put(METADATA_TASK_FAILURE_CONTEXT, taskState.getTaskFailureException().or(UNKNOWN_VALUE));
taskMetadataBuilder.put(EventSubmitter.EVENT_TYPE, TASK_STATE);
this.eventSubmitter.submit(TASK_STATE, taskMetadataBuilder.build());
}
}
| 1,353 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/AbstractJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.Authenticator;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import com.github.rholder.retry.RetryException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.CaseFormat;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.CommitSequenceStore;
import org.apache.gobblin.commit.DeliverySemantics;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.initializer.ConverterInitializerFactory;
import org.apache.gobblin.destination.DestinationDatasetHandlerService;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventName;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.MultiEventMetadataGenerator;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.gobblin.runtime.listeners.CloseableJobListener;
import org.apache.gobblin.runtime.listeners.JobExecutionEventSubmitterListener;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.runtime.listeners.JobListeners;
import org.apache.gobblin.runtime.locks.JobLock;
import org.apache.gobblin.runtime.locks.JobLockEventListener;
import org.apache.gobblin.runtime.locks.JobLockException;
import org.apache.gobblin.runtime.locks.LegacyJobLockFactoryManager;
import org.apache.gobblin.runtime.metrics.GobblinJobMetricReporter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooterFactory;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.util.GsonUtils;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.source.InfiniteSource;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.WorkUnitStreamSource;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.stream.WorkUnitChangeEvent;
import org.apache.gobblin.util.ClusterNameTags;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.Id;
import org.apache.gobblin.util.JobLauncherUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.initializer.WriterInitializerFactory;
/**
* An abstract implementation of {@link JobLauncher} that handles common tasks for launching and running a job.
*
* @author Yinan Li
*/
public abstract class AbstractJobLauncher implements JobLauncher {
static final Logger LOG = LoggerFactory.getLogger(AbstractJobLauncher.class);
public static final String TASK_STATE_STORE_TABLE_SUFFIX = ".tst";
public static final String JOB_STATE_FILE_NAME = "job.state";
public static final String GOBBLIN_JOB_TEMPLATE_KEY = "gobblin.template.uri";
public static final String NUM_WORKUNITS = "numWorkUnits";
/** Making {@link AbstractJobLauncher} capable of loading multiple job templates.
* Keep the original {@link #GOBBLIN_JOB_TEMPLATE_KEY} for backward-compatibility.
* TODO: Expand support to Gobblin-as-a-Service in FlowTemplateCatalog.
* */
public static final String GOBBLIN_JOB_MULTI_TEMPLATE_KEY = "gobblin.template.uris";
// Job configuration properties
protected final Properties jobProps;
// This contains all job context information
protected final JobContext jobContext;
// This (optional) JobLock is used to prevent the next scheduled run
// of the job from starting if the current run has not finished yet
protected Optional<JobLock> jobLockOptional = Optional.absent();
// A conditional variable for which the condition is satisfied if a cancellation is requested
protected final Object cancellationRequest = new Object();
// A flag indicating whether a cancellation has been requested or not
protected volatile boolean cancellationRequested = false;
// A conditional variable for which the condition is satisfied if the cancellation is executed
protected final Object cancellationExecution = new Object();
// A flag indicating whether a cancellation has been executed or not
protected volatile boolean cancellationExecuted = false;
// A single-thread executor for executing job cancellation
protected final ExecutorService cancellationExecutor;
// An MetricContext to track runtime metrics only if metrics are enabled.
protected final Optional<MetricContext> runtimeMetricContext;
// An EventBuilder with basic metadata.
protected final EventSubmitter eventSubmitter;
// This is for dispatching events related to job launching and execution to registered subscribers
protected final EventBus eventBus = new EventBus(AbstractJobLauncher.class.getSimpleName());
// A list of JobListeners that will be injected into the user provided JobListener
private final List<JobListener> mandatoryJobListeners = Lists.newArrayList();
// Used to generate additional metadata to emit in timing events
private final MultiEventMetadataGenerator multiEventMetadataGenerator;
private final AutomaticTroubleshooter troubleshooter;
protected final GobblinJobMetricReporter gobblinJobMetricsReporter;
public AbstractJobLauncher(Properties jobProps, List<? extends Tag<?>> metadataTags)
throws Exception {
this(jobProps, metadataTags, null);
}
public AbstractJobLauncher(Properties jobProps, List<? extends Tag<?>> metadataTags,
@Nullable SharedResourcesBroker<GobblinScopeTypes> instanceBroker)
throws Exception {
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.JOB_NAME_KEY),
"A job must have a job name specified by job.name");
// Add clusterIdentifier tag so that it is added to any new TaskState created
List<Tag<?>> clusterNameTags = Lists.newArrayList();
clusterNameTags.addAll(Tag.fromMap(ClusterNameTags.getClusterNameTags()));
GobblinMetrics.addCustomTagsToProperties(jobProps, clusterNameTags);
troubleshooter = AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(jobProps));
troubleshooter.start();
// Make a copy for both the system and job configuration properties and resolve the job-template if any.
this.jobProps = new Properties();
this.jobProps.putAll(jobProps);
resolveGobblinJobTemplateIfNecessary(this.jobProps);
if (!tryLockJob(this.jobProps)) {
throw new JobException(String.format("Previous instance of job %s is still running, skipping this scheduled run",
this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)));
}
try {
setDefaultAuthenticator(this.jobProps);
if (instanceBroker == null) {
instanceBroker = createDefaultInstanceBroker(jobProps);
}
this.jobContext = new JobContext(this.jobProps, LOG, instanceBroker, troubleshooter.getIssueRepository());
this.eventBus.register(this.jobContext);
this.cancellationExecutor = Executors.newSingleThreadExecutor(
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOG), Optional.of("CancellationExecutor")));
this.runtimeMetricContext =
this.jobContext.getJobMetricsOptional().transform(new Function<JobMetrics, MetricContext>() {
@Override
public MetricContext apply(JobMetrics input) {
return input.getMetricContext();
}
});
this.eventSubmitter = buildEventSubmitter(metadataTags);
// Add all custom tags to the JobState so that tags are added to any new TaskState created
GobblinMetrics.addCustomTagToState(this.jobContext.getJobState(), metadataTags);
JobExecutionEventSubmitter jobExecutionEventSubmitter = new JobExecutionEventSubmitter(this.eventSubmitter);
this.mandatoryJobListeners.add(new JobExecutionEventSubmitterListener(jobExecutionEventSubmitter));
this.multiEventMetadataGenerator = new MultiEventMetadataGenerator(
PropertiesUtils.getPropAsList(jobProps, ConfigurationKeys.EVENT_METADATA_GENERATOR_CLASS_KEY,
ConfigurationKeys.DEFAULT_EVENT_METADATA_GENERATOR_CLASS_KEY));
String jobMetricsReporterClassName = this.jobProps.getProperty(ConfigurationKeys.JOB_METRICS_REPORTER_CLASS_KEY, ConfigurationKeys.DEFAULT_JOB_METRICS_REPORTER_CLASS);
this.gobblinJobMetricsReporter = (GobblinJobMetricReporter) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(jobMetricsReporterClassName),
this.runtimeMetricContext);
} catch (Exception e) {
unlockJob();
throw e;
}
}
/**
* Set default {@link Authenticator} to the one provided in {@link ConfigurationKeys#DEFAULT_AUTHENTICATOR_CLASS},
* calling the constructor using the provided {@link Properties}
*/
public static void setDefaultAuthenticator(Properties properties) {
String authenticatorClass = properties.getProperty(ConfigurationKeys.DEFAULT_AUTHENTICATOR_CLASS);
if (!Strings.isNullOrEmpty(authenticatorClass)) {
Authenticator authenticator = GobblinConstructorUtils.invokeConstructor(Authenticator.class, authenticatorClass, properties);
Authenticator.setDefault(authenticator);
}
}
/**
* Handle {@link WorkUnitChangeEvent}, by default it will do nothing
*/
@Subscribe
public void handleWorkUnitChangeEvent(WorkUnitChangeEvent workUnitChangeEvent)
throws InvocationTargetException {
LOG.info("start to handle workunit change event");
try {
this.removeTasksFromCurrentJob(workUnitChangeEvent.getOldTaskIds());
this.addTasksToCurrentJob(workUnitChangeEvent.getNewWorkUnits());
} catch (Exception e) {
//todo: emit some event to indicate there is an error handling this event that may cause starvation
throw new InvocationTargetException(e);
}
}
protected void removeTasksFromCurrentJob(List<String> taskIdsToRemove) throws IOException, ExecutionException,
RetryException {}
protected void addTasksToCurrentJob(List<WorkUnit> workUnitsToAdd) throws IOException, ExecutionException,
RetryException {}
/**
* To supporting 'gobblin.template.uri' in any types of jobLauncher, place this resolution as a public-static method
* to make it accessible for all implementation of JobLauncher and **AzkabanJobLauncher**.
*
* @param jobProps Gobblin Job-level properties.
*/
public static void resolveGobblinJobTemplateIfNecessary(Properties jobProps) throws IOException, URISyntaxException,
SpecNotFoundException,
JobTemplate.TemplateException {
Config config = ConfigUtils.propertiesToConfig(jobProps);
JobSpecResolver resolver = JobSpecResolver.builder(config).build();
JobSpec jobSpec = null;
if (jobProps.containsKey(GOBBLIN_JOB_TEMPLATE_KEY)) {
URI templateUri = new URI(jobProps.getProperty(GOBBLIN_JOB_TEMPLATE_KEY));
jobSpec = JobSpec.builder().withConfig(config).withTemplate(templateUri).build();
} else if (jobProps.containsKey(GOBBLIN_JOB_MULTI_TEMPLATE_KEY)) {
List<URI> templatesURIs = new ArrayList<>();
for (String uri : jobProps.getProperty(GOBBLIN_JOB_MULTI_TEMPLATE_KEY).split(",")) {
templatesURIs.add(new URI(uri));
}
jobSpec = JobSpec.builder().withConfig(config).withResourceTemplates(templatesURIs).build();
}
if (jobSpec != null ) {
jobProps.putAll(ConfigUtils.configToProperties(resolver.resolveJobSpec(jobSpec).getConfig()));
}
}
private static SharedResourcesBroker<GobblinScopeTypes> createDefaultInstanceBroker(Properties jobProps) {
LOG.warn("Creating a job specific {}. Objects will only be shared at the job level.",
SharedResourcesBroker.class.getSimpleName());
return SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.parseProperties(jobProps),
GobblinScopeTypes.GLOBAL.defaultScopeInstance());
}
/**
* The JobContext of the particular job.
*
* @return {@link JobContext} of the job
*/
JobContext getJobContext() {
return this.jobContext;
}
/**
* A default implementation of {@link JobLauncher#cancelJob(JobListener)}.
*
* <p>
* This implementation relies on two conditional variables: one for the condition that a cancellation
* is requested, and the other for the condition that the cancellation is executed. Upon entrance, the
* method notifies the cancellation executor started by {@link #startCancellationExecutor()} on the
* first conditional variable to indicate that a cancellation has been requested so the executor is
* unblocked. Then it waits on the second conditional variable for the cancellation to be executed.
* </p>
*
* <p>
* The actual execution of the cancellation is handled by the cancellation executor started by the
* method {@link #startCancellationExecutor()} that uses the {@link #executeCancellation()} method
* to execute the cancellation.
* </p>
*
* {@inheritDoc JobLauncher#cancelJob(JobListener)}
*/
@Override
public void cancelJob(JobListener jobListener)
throws JobException {
synchronized (this.cancellationRequest) {
if (this.cancellationRequested) {
// Return immediately if a cancellation has already been requested
return;
}
this.cancellationRequested = true;
// Notify the cancellation executor that a cancellation has been requested
this.cancellationRequest.notify();
}
synchronized (this.cancellationExecution) {
try {
while (!this.cancellationExecuted) {
// Wait for the cancellation to be executed
this.cancellationExecution.wait();
}
try {
LOG.info("Current job state is: " + this.jobContext.getJobState().getState());
if (this.jobContext.getJobState().getState() != JobState.RunningState.COMMITTED && (
this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS
|| this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_PARTIAL_SUCCESS)) {
this.jobContext.finalizeJobStateBeforeCommit();
this.jobContext.commit(true);
}
this.jobContext.close();
} catch (IOException ioe) {
LOG.error("Could not close job context.", ioe);
}
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_CANCEL, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCancellation(jobContext);
}
});
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
/**
* This predicate checks if a work unit should be skipped. If yes, then it will removed
* from the list of workUnits and it's state will be saved.
*/
@RequiredArgsConstructor
private static class SkippedWorkUnitsFilter implements Predicate<WorkUnit> {
private final JobState jobState;
@Override
public boolean apply(WorkUnit workUnit) {
if (workUnit instanceof MultiWorkUnit) {
Preconditions.checkArgument(!workUnit.contains(ConfigurationKeys.WORK_UNIT_SKIP_KEY),
"Error: MultiWorkUnit cannot be skipped");
for (WorkUnit wu : ((MultiWorkUnit) workUnit).getWorkUnits()) {
Preconditions.checkArgument(!wu.contains(ConfigurationKeys.WORK_UNIT_SKIP_KEY),
"Error: MultiWorkUnit cannot contain skipped WorkUnit");
}
}
if (workUnit.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_SKIP_KEY, false)) {
WorkUnitState workUnitState = new WorkUnitState(workUnit, this.jobState);
workUnitState.setWorkingState(WorkUnitState.WorkingState.SKIPPED);
this.jobState.addSkippedTaskState(new TaskState(workUnitState));
return false;
}
return true;
}
}
@Override
public void launchJob(JobListener jobListener)
throws JobException {
String jobId = this.jobContext.getJobId();
final JobState jobState = this.jobContext.getJobState();
boolean isWorkUnitsEmpty = false;
try {
MDC.put(ConfigurationKeys.JOB_NAME_KEY, this.jobContext.getJobName());
MDC.put(ConfigurationKeys.JOB_KEY_KEY, this.jobContext.getJobKey());
TimingEvent launchJobTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.FULL_JOB_EXECUTION);
try (Closer closer = Closer.create()) {
closer.register(this.jobContext);
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_PREPARE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobPrepare(jobContext);
}
});
if (this.jobContext.getSemantics() == DeliverySemantics.EXACTLY_ONCE) {
// If exactly-once is used, commit sequences of the previous run must be successfully compelted
// before this run can make progress.
executeUnfinishedCommitSequences(jobState.getJobName());
}
Source<?, ?> source = this.jobContext.getSource();
if (source instanceof InfiniteSource) {
((InfiniteSource) source).getEventBus().register(this);
} else if (source instanceof SourceDecorator) {
if (((SourceDecorator<?, ?>) source).getEventBus() != null) {
((SourceDecorator<?, ?>) source).getEventBus().register(this);
}
}
TimingEvent workUnitsCreationTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_CREATION);
WorkUnitStream workUnitStream;
if (source instanceof WorkUnitStreamSource) {
workUnitStream = ((WorkUnitStreamSource) source).getWorkunitStream(jobState);
} else {
workUnitStream = new BasicWorkUnitStream.Builder(source.getWorkunits(jobState)).build();
}
workUnitsCreationTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.WORK_UNITS_CREATION));
this.gobblinJobMetricsReporter.reportWorkUnitCreationTimerMetrics(workUnitsCreationTimer, jobState);
// The absence means there is something wrong getting the work units
if (workUnitStream == null || workUnitStream.getWorkUnits() == null) {
this.eventSubmitter.submit(JobEvent.WORK_UNITS_MISSING);
jobState.setState(JobState.RunningState.FAILED);
String errMsg = "Failed to get work units for job " + jobId;
this.jobContext.getJobState().setJobFailureMessage(errMsg);
this.jobContext.getJobState().setProp(NUM_WORKUNITS, 0);
this.gobblinJobMetricsReporter.reportWorkUnitCountMetrics(0, jobState);
throw new JobException(errMsg);
}
// No work unit to run
if (!workUnitStream.getWorkUnits().hasNext()) {
this.eventSubmitter.submit(JobEvent.WORK_UNITS_EMPTY);
LOG.warn("No work units have been created for job " + jobId);
jobState.setState(JobState.RunningState.COMMITTED);
isWorkUnitsEmpty = true;
this.jobContext.getJobState().setProp(NUM_WORKUNITS, 0);
this.gobblinJobMetricsReporter.reportWorkUnitCountMetrics(0, jobState);
return;
}
// calculation of total bytes to copy in a job used to track a job's copy progress
if (jobState.getPropAsBoolean(ConfigurationKeys.REPORT_JOB_PROGRESS, ConfigurationKeys.DEFAULT_REPORT_JOB_PROGRESS)) {
LOG.info("Report job progress config is turned on");
if (workUnitStream.isSafeToMaterialize()) {
long totalSizeInBytes = sumWorkUnitsSizes(workUnitStream);
this.jobContext.getJobState().setProp(ServiceConfigKeys.TOTAL_WORK_UNIT_SIZE, totalSizeInBytes);
} else {
LOG.warn("Property " + ConfigurationKeys.REPORT_JOB_PROGRESS + " is turned on, but "
+ "progress cannot be reported for infinite work unit streams. Turn off property "
+ ConfigurationKeys.REPORT_JOB_PROGRESS);
}
}
// Perform work needed before writing is done
Boolean canCleanUp = this.canCleanStagingData(this.jobContext.getJobState());
workUnitStream = closer.register(new DestinationDatasetHandlerService(jobState, canCleanUp, this.eventSubmitter))
.executeHandlers(workUnitStream);
//Initialize writer and converter(s)
closer.register(WriterInitializerFactory.newInstace(jobState, workUnitStream)).initialize();
closer.register(ConverterInitializerFactory.newInstance(jobState, workUnitStream)).initialize();
TimingEvent stagingDataCleanTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_STAGING_DATA_CLEAN);
// Cleanup left-over staging data possibly from the previous run. This is particularly
// important if the current batch of WorkUnits include failed WorkUnits from the previous
// run which may still have left-over staging data not cleaned up yet.
cleanLeftoverStagingData(workUnitStream, jobState);
stagingDataCleanTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.MR_STAGING_DATA_CLEAN));
long startTime = System.currentTimeMillis();
jobState.setStartTime(startTime);
jobState.setState(JobState.RunningState.RUNNING);
try {
LOG.info("Starting job " + jobId);
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_START, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobStart(jobContext);
}
});
TimingEvent workUnitsPreparationTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_PREPARATION);
// Add task ids
workUnitStream = prepareWorkUnits(workUnitStream, jobState);
// Remove skipped workUnits from the list of work units to execute.
workUnitStream = workUnitStream.filter(new SkippedWorkUnitsFilter(jobState));
// Add surviving tasks to jobState
workUnitStream = workUnitStream.transform(new MultiWorkUnitForEach() {
@Override
public void forWorkUnit(WorkUnit workUnit) {
jobState.incrementTaskCount();
jobState.addTaskState(new TaskState(new WorkUnitState(workUnit, jobState)));
}
});
// If it is a streaming source, workunits cannot be counted
this.jobContext.getJobState().setProp(NUM_WORKUNITS,
workUnitStream.isSafeToMaterialize() ? workUnitStream.getMaterializedWorkUnitCollection().size() : 0);
this.gobblinJobMetricsReporter.reportWorkUnitCountMetrics(this.jobContext.getJobState().getPropAsInt(NUM_WORKUNITS), jobState);
// dump the work unit if tracking logs are enabled
if (jobState.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_ENABLE_TRACKING_LOGS)) {
workUnitStream = workUnitStream.transform(new Function<WorkUnit, WorkUnit>() {
@Nullable
@Override
public WorkUnit apply(@Nullable WorkUnit input) {
LOG.info("Work unit tracking log: {}", input);
return input;
}
});
}
workUnitsPreparationTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.WORK_UNITS_PREPARATION));
// Write job execution info to the job history store before the job starts to run
this.jobContext.storeJobExecutionInfo();
TimingEvent jobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_RUN);
// Start the job and wait for it to finish
runWorkUnitStream(workUnitStream);
jobRunTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,EventName.JOB_RUN));
this.eventSubmitter
.submit(CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, "JOB_" + jobState.getState()));
// Check and set final job jobPropsState upon job completion
if (jobState.getState() == JobState.RunningState.CANCELLED) {
LOG.info(String.format("Job %s has been cancelled, aborting now", jobId));
return;
}
TimingEvent jobCommitTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_COMMIT);
this.jobContext.finalizeJobStateBeforeCommit();
this.jobContext.commit();
postProcessJobState(jobState);
jobCommitTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_COMMIT));
} finally {
long endTime = System.currentTimeMillis();
jobState.setEndTime(endTime);
jobState.setDuration(endTime - jobState.getStartTime());
}
} catch (Throwable t) {
jobState.setState(JobState.RunningState.FAILED);
String errMsg = "Failed to launch and run job " + jobId + " due to " + t.getMessage();
LOG.error(errMsg + ": " + t, t);
this.jobContext.getJobState().setJobFailureException(t);
jobState.setProp(ConfigurationKeys.JOB_FAILURES_KEY,
Integer.parseInt(jobState.getProp(ConfigurationKeys.JOB_FAILURES_KEY, "0")) + 1);
} finally {
try {
troubleshooter.refineIssues();
troubleshooter.logIssueSummary();
troubleshooter.reportJobIssuesAsEvents(eventSubmitter);
} catch (Exception e) {
LOG.error("Failed to report issues", e);
}
try {
TimingEvent jobCleanupTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_CLEANUP);
cleanupStagingData(jobState);
jobCleanupTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_CLEANUP));
// Write job execution info to the job history store upon job termination
this.jobContext.storeJobExecutionInfo();
} finally {
launchJobTimer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext, EventName.FULL_JOB_EXECUTION));
if (isWorkUnitsEmpty) {
//If no WorkUnits are created, first send the JobCompleteTimer event.
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCompletion(jobContext);
}
});
//Next, send the JobSucceededTimer event.
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_SUCCEEDED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
} else {
for (JobState.DatasetState datasetState : this.jobContext.getDatasetStatesByUrns().values()) {
// Set the overall job state to FAILED if the job failed to process any dataset
if (datasetState.getState() == JobState.RunningState.FAILED) {
jobState.setState(JobState.RunningState.FAILED);
LOG.warn("At least one dataset state is FAILED. Setting job state to FAILED.");
break;
}
}
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobCompletion(jobContext);
}
});
if (jobState.getState() == JobState.RunningState.FAILED || jobState.getState() == JobState.RunningState.CANCELLED) {
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_FAILED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
throw new JobException(String.format("Job %s failed", jobId));
} else {
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_SUCCEEDED, new JobListenerAction() {
@Override
public void apply(JobListener jobListener, JobContext jobContext)
throws Exception {
jobListener.onJobFailure(jobContext);
}
});
}
}
}
}
} finally {
// Stop metrics reporting
if (this.jobContext.getJobMetricsOptional().isPresent()) {
JobMetrics.remove(jobState);
}
MDC.remove(ConfigurationKeys.JOB_NAME_KEY);
MDC.remove(ConfigurationKeys.JOB_KEY_KEY);
}
}
@VisibleForTesting
public static long sumWorkUnitsSizes (WorkUnitStream workUnitStream) {
Collection<WorkUnit> workUnits = JobLauncherUtils.flattenWorkUnits(workUnitStream.getMaterializedWorkUnitCollection());
long totalSizeInBytes = workUnits.stream().mapToLong(wu -> wu.getPropAsLong(ServiceConfigKeys.WORK_UNIT_SIZE, 0)).sum();
return totalSizeInBytes;
}
private void executeUnfinishedCommitSequences(String jobName)
throws IOException {
Preconditions.checkState(this.jobContext.getCommitSequenceStore().isPresent());
CommitSequenceStore commitSequenceStore = this.jobContext.getCommitSequenceStore().get();
for (String datasetUrn : commitSequenceStore.get(jobName)) {
Optional<CommitSequence> commitSequence = commitSequenceStore.get(jobName, datasetUrn);
if (commitSequence.isPresent()) {
commitSequence.get().execute();
}
commitSequenceStore.delete(jobName, datasetUrn);
}
}
/**
* Subclasses can override this method to do whatever processing on the {@link TaskState}s,
* e.g., aggregate task-level metrics into job-level metrics.
*
* @deprecated Use {@link #postProcessJobState(JobState)}
*/
@Deprecated
protected void postProcessTaskStates(@SuppressWarnings("unused") List<TaskState> taskStates) {
// Do nothing
}
/**
* Subclasses can override this method to do whatever processing on the {@link JobState} and its
* associated {@link TaskState}s, e.g., aggregate task-level metrics into job-level metrics.
*/
protected void postProcessJobState(JobState jobState) {
postProcessTaskStates(jobState.getTaskStates());
if (!GobblinMetrics.isEnabled(this.jobProps)) {
return;
}
List<DatasetTaskSummary> datasetTaskSummaries = new ArrayList<>();
Map<String, JobState.DatasetState> datasetStates = this.jobContext.getDatasetStatesByUrns();
// Only process successful datasets unless configuration to process failed datasets is set
boolean processFailedTasks = PropertiesUtils.getPropAsBoolean(this.jobProps, ConfigurationKeys.WRITER_COUNT_METRICS_FROM_FAILED_TASKS, "false");
for (JobState.DatasetState datasetState : datasetStates.values()) {
if (datasetState.getState() == JobState.RunningState.COMMITTED
|| (datasetState.getState() == JobState.RunningState.FAILED && this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS)) {
long totalBytesWritten = 0;
long totalRecordsWritten = 0;
for (TaskState taskState : datasetState.getTaskStates()) {
// Certain writers may omit these metrics e.g. CompactionLauncherWriter
if ((taskState.getWorkingState() == WorkUnitState.WorkingState.COMMITTED || processFailedTasks)) {
totalBytesWritten += taskState.getPropAsLong(ConfigurationKeys.WRITER_BYTES_WRITTEN, 0);
totalRecordsWritten += taskState.getPropAsLong(ConfigurationKeys.WRITER_RECORDS_WRITTEN, 0);
}
}
LOG.info(String.format("DatasetMetrics for '%s' - (records: %d; bytes: %d)", datasetState.getDatasetUrn(), totalRecordsWritten, totalBytesWritten));
datasetTaskSummaries.add(new DatasetTaskSummary(datasetState.getDatasetUrn(), totalRecordsWritten, totalBytesWritten,
datasetState.getState() == JobState.RunningState.COMMITTED));
} else if (datasetState.getState() == JobState.RunningState.FAILED) {
// Check if config is turned on for submitting writer metrics on failure due to non-atomic write semantics
if (this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS) {
LOG.info("Due to task failure, will report that no records or bytes were written for " + datasetState.getDatasetUrn());
datasetTaskSummaries.add(new DatasetTaskSummary(datasetState.getDatasetUrn(), 0, 0, false));
}
}
}
TimingEvent jobSummaryTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_SUMMARY);
jobSummaryTimer.addMetadata(TimingEvent.DATASET_TASK_SUMMARIES, GsonUtils.GSON_WITH_DATE_HANDLING.toJson(datasetTaskSummaries));
jobSummaryTimer.stop();
}
@Override
public void close()
throws IOException {
troubleshooter.stop();
try {
this.cancellationExecutor.shutdownNow();
try {
this.jobContext.getSource().shutdown(this.jobContext.getJobState());
} finally {
if (GobblinMetrics.isEnabled(this.jobProps)) {
GobblinMetricsRegistry.getInstance().remove(this.jobContext.getJobId());
}
}
} finally {
unlockJob();
}
}
/**
* Run the given job.
*
* <p>
* The contract between {@link AbstractJobLauncher#launchJob(JobListener)} and this method is this method
* is responsible for for setting {@link JobState.RunningState} properly and upon returning from this method
* (either normally or due to exceptions) whatever {@link JobState.RunningState} is set in this method is
* used to determine if the job has finished.
* </p>
*
* @param workUnits List of {@link WorkUnit}s of the job
*/
protected abstract void runWorkUnits(List<WorkUnit> workUnits)
throws Exception;
/**
* Run the given job.
*
* <p>
* The contract between {@link AbstractJobLauncher#launchJob(JobListener)} and this method is this method
* is responsible for for setting {@link JobState.RunningState} properly and upon returning from this method
* (either normally or due to exceptions) whatever {@link JobState.RunningState} is set in this method is
* used to determine if the job has finished.
* </p>
*
* @param workUnitStream stream of {@link WorkUnit}s of the job
*/
protected void runWorkUnitStream(WorkUnitStream workUnitStream) throws Exception {
runWorkUnits(materializeWorkUnitList(workUnitStream));
}
/**
* Materialize a {@link WorkUnitStream} into an in-memory list. Note that infinite work unit streams cannot be materialized.
*/
private List<WorkUnit> materializeWorkUnitList(WorkUnitStream workUnitStream) {
if (!workUnitStream.isFiniteStream()) {
throw new UnsupportedOperationException("Cannot materialize an infinite work unit stream.");
}
return Lists.newArrayList(workUnitStream.getWorkUnits());
}
/**
* Get a {@link JobLock} to be used for the job.
*
* @param properties the job properties
* @param jobLockEventListener the listener for lock events.
* @return {@link JobLock} to be used for the job
* @throws JobLockException throw when the {@link JobLock} fails to initialize
*/
protected JobLock getJobLock(Properties properties, JobLockEventListener jobLockEventListener)
throws JobLockException {
return LegacyJobLockFactoryManager.getJobLock(properties, jobLockEventListener);
}
/**
* Execute the job cancellation.
* The implementation should not throw any exceptions because that will kill the `Cancellation Executor` thread
* and will create a deadlock.
*/
protected abstract void executeCancellation();
/**
* Start the scheduled executor for executing job cancellation.
*
* <p>
* The executor, upon started, waits on the condition variable indicating a cancellation is requested,
* i.e., it waits for a cancellation request to arrive. If a cancellation is requested, the executor
* is unblocked and calls {@link #executeCancellation()} to execute the cancellation. Upon completion
* of the cancellation execution, the executor notifies the caller that requested the cancellation on
* the conditional variable indicating the cancellation has been executed so the caller is unblocked.
* Upon successful execution of the cancellation, it sets the job state to
* {@link JobState.RunningState#CANCELLED}.
* </p>
*/
protected void startCancellationExecutor() {
this.cancellationExecutor.execute(new Runnable() {
@Override
public void run() {
synchronized (AbstractJobLauncher.this.cancellationRequest) {
try {
while (!AbstractJobLauncher.this.cancellationRequested) {
// Wait for a cancellation request to arrive
AbstractJobLauncher.this.cancellationRequest.wait();
}
LOG.info("Cancellation has been requested for job " + AbstractJobLauncher.this.jobContext.getJobId());
executeCancellation();
LOG.info("Cancellation has been executed for job " + AbstractJobLauncher.this.jobContext.getJobId());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
synchronized (AbstractJobLauncher.this.cancellationExecution) {
AbstractJobLauncher.this.cancellationExecuted = true;
AbstractJobLauncher.this.jobContext.getJobState().setState(JobState.RunningState.CANCELLED);
// Notify that the cancellation has been executed
AbstractJobLauncher.this.cancellationExecution.notifyAll();
}
}
});
}
/**
* Prepare the flattened {@link WorkUnit}s for execution by populating the job and task IDs.
*/
private WorkUnitStream prepareWorkUnits(WorkUnitStream workUnits, JobState jobState) {
return workUnits.transform(new WorkUnitPreparator(this.jobContext.getJobId()));
}
private static abstract class MultiWorkUnitForEach implements Function<WorkUnit, WorkUnit> {
@Nullable
@Override
public WorkUnit apply(WorkUnit input) {
if (input instanceof MultiWorkUnit) {
for (WorkUnit wu : ((MultiWorkUnit) input).getWorkUnits()) {
forWorkUnit(wu);
}
} else {
forWorkUnit(input);
}
return input;
}
protected abstract void forWorkUnit(WorkUnit workUnit);
}
@RequiredArgsConstructor
private static class WorkUnitPreparator extends MultiWorkUnitForEach {
private int taskIdSequence = 0;
private final String jobId;
@Override
protected void forWorkUnit(WorkUnit workUnit) {
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, this.jobId);
String taskId = JobLauncherUtils.newTaskId(this.jobId, this.taskIdSequence++);
workUnit.setId(taskId);
workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, taskId);
workUnit.setProp(ConfigurationKeys.TASK_KEY_KEY, Long.toString(Id.Task.parse(taskId).getSequence()));
}
}
/**
* Try acquiring the job lock and return whether the lock is successfully locked.
*
* @param properties the job properties
*/
private boolean tryLockJob(Properties properties) {
try {
if (Boolean.valueOf(properties.getProperty(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, Boolean.TRUE.toString()))) {
this.jobLockOptional = Optional.of(getJobLock(properties, new JobLockEventListener() {
@Override
public void onLost() {
executeCancellation();
}
}));
}
return !this.jobLockOptional.isPresent() || this.jobLockOptional.get().tryLock();
} catch (JobLockException ioe) {
LOG.error(String.format("Failed to acquire job lock for job %s: %s", this.jobContext.getJobId(), ioe), ioe);
return false;
}
}
/**
* Unlock a completed or failed job.
*/
private void unlockJob() {
if (this.jobLockOptional.isPresent()) {
try {
// Unlock so the next run of the same job can proceed
this.jobLockOptional.get().unlock();
} catch (JobLockException ioe) {
LOG.error(String.format("Failed to unlock for job %s: %s", this.jobContext.getJobId(), ioe), ioe);
} finally {
try {
this.jobLockOptional.get().close();
} catch (IOException e) {
LOG.error(String.format("Failed to close job lock for job %s: %s", this.jobContext.getJobId(), e), e);
} finally {
this.jobLockOptional = Optional.absent();
}
}
}
}
/**
* Combines the specified {@link JobListener} with the {@link #mandatoryJobListeners} for this job. Uses
* {@link JobListeners#parallelJobListener(List)} to create a {@link CloseableJobListener} that will execute all
* the {@link JobListener}s in parallel.
*/
private CloseableJobListener getParallelCombinedJobListener(JobState jobState, JobListener jobListener) {
List<JobListener> jobListeners = Lists.newArrayList(this.mandatoryJobListeners);
jobListeners.add(jobListener);
Set<String> jobListenerClassNames = jobState.getPropAsSet(ConfigurationKeys.JOB_LISTENERS_KEY, StringUtils.EMPTY);
for (String jobListenerClassName : jobListenerClassNames) {
try {
@SuppressWarnings("unchecked")
Class<? extends JobListener> jobListenerClass =
(Class<? extends JobListener>) Class.forName(jobListenerClassName);
jobListeners.add(jobListenerClass.newInstance());
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
LOG.warn(String.format("JobListener could not be created due to %s", jobListenerClassName), e);
}
}
return JobListeners.parallelJobListener(jobListeners);
}
/**
* Takes a {@link List} of {@link Tag}s and returns a new {@link List} with the original {@link Tag}s as well as any
* additional {@link Tag}s returned by {@link ClusterNameTags#getClusterNameTags()}.
*
* @see ClusterNameTags
*/
private static List<Tag<?>> addClusterNameTags(List<? extends Tag<?>> tags) {
return ImmutableList.<Tag<?>>builder().addAll(tags).addAll(Tag.fromMap(ClusterNameTags.getClusterNameTags()))
.build();
}
/**
* Build the {@link EventSubmitter} for this class.
*/
private EventSubmitter buildEventSubmitter(List<? extends Tag<?>> tags) {
return new EventSubmitter.Builder(this.runtimeMetricContext, "gobblin.runtime")
.addMetadata(Tag.toMap(Tag.tagValuesToString(tags))).build();
}
/**
* Cleanup the left-over staging data possibly from the previous run of the job that may have failed
* and not cleaned up its staging data.
*
* Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
* staging data per task, or to cleanup entire job's staging data at once.
*
* Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
*/
private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobState)
throws JobException {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
//Clean up will be done by initializer.
return;
}
try {
if (!canCleanStagingData(jobState)) {
LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
return;
}
} catch (IOException e) {
throw new JobException("Failed to check unfinished commit sequences", e);
}
try {
if (this.jobContext.shouldCleanupStagingDataPerTask()) {
if (workUnits.isSafeToMaterialize()) {
Closer closer = Closer.create();
Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
try {
for (WorkUnit workUnit : JobLauncherUtils.flattenWorkUnits(workUnits.getMaterializedWorkUnitCollection())) {
JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(workUnit, jobState), LOG, closer, parallelRunners);
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
} else {
throw new RuntimeException("Work unit streams do not support cleaning staging data per task.");
}
} else {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_OLD_JOBS_DATA, ConfigurationKeys.DEFAULT_CLEANUP_OLD_JOBS_DATA)) {
JobLauncherUtils.cleanUpOldJobData(jobState, LOG, jobContext.getStagingDirProvided(), jobContext.getOutputDirProvided());
}
JobLauncherUtils.cleanJobStagingData(jobState, LOG);
}
} catch (Throwable t) {
// Catch Throwable instead of just IOException to make sure failure of this won't affect the current run
LOG.error("Failed to clean leftover staging data", t);
}
}
private static String getJobIdPrefix(String jobId) {
return jobId.substring(0, jobId.lastIndexOf(Id.Job.SEPARATOR) + 1);
}
/**
* Cleanup the job's task staging data. This is not doing anything in case job succeeds
* and data is successfully committed because the staging data has already been moved
* to the job output directory. But in case the job fails and data is not committed,
* we want the staging data to be cleaned up.
*
* Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
* staging data per task, or to cleanup entire job's staging data at once.
*
* Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
*/
private void cleanupStagingData(JobState jobState)
throws JobException {
if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
//Clean up will be done by initializer.
return;
}
try {
if (!canCleanStagingData(jobState)) {
LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data.");
return;
}
} catch (IOException e) {
throw new JobException("Failed to check unfinished commit sequences", e);
}
if (this.jobContext.shouldCleanupStagingDataPerTask()) {
cleanupStagingDataPerTask(jobState);
} else {
cleanupStagingDataForEntireJob(jobState);
}
}
public boolean isEarlyStopped() {
return this.jobContext.getSource().isEarlyStopped();
}
protected IssueRepository getIssueRepository() {
return troubleshooter.getIssueRepository();
}
/**
* Staging data cannot be cleaned if exactly once semantics is used, and the job has unfinished
* commit sequences.
*/
private boolean canCleanStagingData(JobState jobState)
throws IOException {
return this.jobContext.getSemantics() != DeliverySemantics.EXACTLY_ONCE || !this.jobContext.getCommitSequenceStore()
.get().exists(jobState.getJobName());
}
private static void cleanupStagingDataPerTask(JobState jobState) {
Closer closer = Closer.create();
Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
try {
for (TaskState taskState : jobState.getTaskStates()) {
try {
JobLauncherUtils.cleanTaskStagingData(taskState, LOG, closer, parallelRunners);
} catch (IOException e) {
LOG.error(String.format("Failed to clean staging data for task %s: %s", taskState.getTaskId(), e), e);
}
}
} finally {
try {
closer.close();
} catch (IOException e) {
LOG.error("Failed to clean staging data", e);
}
}
}
private static void cleanupStagingDataForEntireJob(JobState jobState) {
try {
JobLauncherUtils.cleanJobStagingData(jobState, LOG);
} catch (IOException e) {
LOG.error("Failed to clean staging data for job " + jobState.getJobId(), e);
}
}
private void notifyListeners(JobContext jobContext, JobListener jobListener, String timerEventName,
JobListenerAction action)
throws JobException {
TimingEvent timer = this.eventSubmitter.getTimingEvent(timerEventName);
try (CloseableJobListener parallelJobListener = getParallelCombinedJobListener(this.jobContext.getJobState(),
jobListener)) {
action.apply(parallelJobListener, jobContext);
} catch (Exception e) {
throw new JobException("Failed to execute all JobListeners", e);
} finally {
LOG.info("Submitting {}", timerEventName);
timer.stop(this.multiEventMetadataGenerator.getMetadata(this.jobContext,
EventName.getEnumFromEventId(timerEventName)));
}
}
private interface JobListenerAction {
void apply(JobListener jobListener, JobContext jobContext)
throws Exception;
}
}
| 1,354 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskCreationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
/**
* Exception thrown while creating task for execution within {@link GobblinMultiTaskAttempt}.
*/
public class TaskCreationException extends IOException {
public TaskCreationException(String message) {
super(message);
}
}
| 1,355 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/DefaultLimiterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.DefaultLimiterFactory}.
*/
@Deprecated
public class DefaultLimiterFactory extends org.apache.gobblin.util.limiter.DefaultLimiterFactory {
}
| 1,356 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/MysqlDatasetStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.CharMatcher;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.MysqlStateStore;
import org.apache.gobblin.metastore.MysqlStateStoreEntryManager;
import org.apache.gobblin.metastore.predicates.StateStorePredicate;
import org.apache.gobblin.runtime.metastore.mysql.MysqlDatasetStateStoreEntryManager;
import javax.sql.DataSource;
/**
* A custom extension to {@link MysqlStateStore} for storing and reading {@link JobState.DatasetState}s.
*
* <p>
* The purpose of having this class is to hide some implementation details that are unnecessarily
* exposed if using the {@link MysqlStateStore} to store and serve job/dataset states between job runs.
* </p>
*
* <p>
* In addition to persisting and reading {@link JobState.DatasetState}s. This class is also able to
* read job state files of existing jobs that store serialized instances of {@link JobState} for
* backward compatibility.
* </p>
*
*/
public class MysqlDatasetStateStore extends MysqlStateStore<JobState.DatasetState>
implements DatasetStateStore<JobState.DatasetState> {
private static final Logger LOGGER = LoggerFactory.getLogger(MysqlDatasetStateStore.class);
public MysqlDatasetStateStore(DataSource dataSource, String stateStoreTableName, boolean compressedValues)
throws IOException {
super(dataSource, stateStoreTableName, compressedValues, JobState.DatasetState.class);
}
/**
* Get a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s.
*
* @param jobName the job name
* @return a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s
* @throws IOException if there's something wrong reading the {@link JobState.DatasetState}s
*/
public Map<String, JobState.DatasetState> getLatestDatasetStatesByUrns(String jobName) throws IOException {
List<JobState.DatasetState> previousDatasetStates =
getAll(jobName, "%" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX, JobStateSearchColumns.TABLE_NAME_ONLY);
Map<String, JobState.DatasetState> datasetStatesByUrns = Maps.newHashMap();
for (JobState.DatasetState previousDatasetState : previousDatasetStates) {
datasetStatesByUrns.put(previousDatasetState.getDatasetUrn(), previousDatasetState);
}
// The dataset (job) state from the deprecated "current.jst" will be read even though
// the job has transitioned to the new dataset-based mechanism
if (datasetStatesByUrns.size() > 1) {
datasetStatesByUrns.remove(ConfigurationKeys.DEFAULT_DATASET_URN);
}
return datasetStatesByUrns;
}
/**
* Get the latest {@link JobState.DatasetState} of a given dataset.
*
* @param storeName the name of the dataset state store
* @param datasetUrn the dataset URN
* @return the latest {@link JobState.DatasetState} of the dataset or {@link null} if it is not found
* @throws IOException
*/
public JobState.DatasetState getLatestDatasetState(String storeName, String datasetUrn) throws IOException {
String alias =
Strings.isNullOrEmpty(datasetUrn) ? CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX
: CharMatcher.is(':').replaceFrom(datasetUrn, '.') + "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
return get(storeName, alias, datasetUrn);
}
/**
* Persist a given {@link JobState.DatasetState}.
*
* @param datasetUrn the dataset URN
* @param datasetState the {@link JobState.DatasetState} to persist
* @throws IOException if there's something wrong persisting the {@link JobState.DatasetState}
*/
public void persistDatasetState(String datasetUrn, JobState.DatasetState datasetState) throws IOException {
String jobName = datasetState.getJobName();
String jobId = datasetState.getJobId();
datasetUrn = CharMatcher.is(':').replaceFrom(datasetUrn, '.');
String tableName = Strings.isNullOrEmpty(datasetUrn) ? jobId + DATASET_STATE_STORE_TABLE_SUFFIX
: datasetUrn + "-" + jobId + DATASET_STATE_STORE_TABLE_SUFFIX;
LOGGER.info("Persisting " + tableName + " to the job state store");
put(jobName, tableName, datasetState);
createAlias(jobName, tableName, getAliasName(datasetUrn));
}
@Override
public void persistDatasetURNs(String storeName, Collection<String> datasetUrns)
throws IOException {
//do nothing for now
}
private static String getAliasName(String datasetUrn) {
return Strings.isNullOrEmpty(datasetUrn) ? CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX
: datasetUrn + "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
}
@Override
public List<MysqlDatasetStateStoreEntryManager> getMetadataForTables(StateStorePredicate predicate)
throws IOException {
// get the metadata from the parent class and convert
List<MysqlStateStoreEntryManager> entryManagers =
(List<MysqlStateStoreEntryManager>) super.getMetadataForTables(predicate);
return entryManagers.stream().map(entry -> new MysqlDatasetStateStoreEntryManager(entry.getStoreName(),
entry.getTableName(), entry.getTimestamp(), this)).collect(Collectors.toList());
}
}
| 1,357 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ForkThrowableHolder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.exception.ExceptionUtils;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
/**
* An object which holds all {@link Throwable}s thrown by {@link org.apache.gobblin.runtime.fork.Fork}, so that other
* Gobblin components (like {@link Task}) can have access.
*/
@Slf4j
public class ForkThrowableHolder {
Map<Integer, Throwable> throwables = Maps.newHashMap();
public void setThrowable(int forkIdx, Throwable e) {
throwables.put(forkIdx, e);
}
public Optional<Throwable> getThrowable (int forkIdx) {
return Optional.fromNullable(throwables.get(forkIdx));
}
public boolean isEmpty() {
return throwables.isEmpty();
}
public ForkException getAggregatedException (List<Integer> failedForkIds, String taskId) {
StringBuffer stringBuffer = new StringBuffer();
stringBuffer.append("Fork branches " + failedForkIds + " failed for task " + taskId + "\n");
for (Integer idx: failedForkIds) {
stringBuffer.append("<Fork " + idx + ">\n");
if (this.throwables.containsKey(idx)) {
stringBuffer.append(ExceptionUtils.getFullStackTrace(this.throwables.get(idx)));
} else {
stringBuffer.append("Cannot find throwable entry in ForkThrowableHolder\n");
}
}
return new ForkException(stringBuffer.toString());
}
}
| 1,358 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/FsDatasetStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.CharMatcher;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import org.apache.gobblin.metastore.predicates.StateStorePredicate;
import org.apache.gobblin.metastore.predicates.StoreNamePredicate;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metastore.nameParser.DatasetUrnStateStoreNameParser;
import org.apache.gobblin.metastore.nameParser.SimpleDatasetUrnStateStoreNameParser;
import org.apache.gobblin.runtime.metastore.filesystem.FsDatasetStateStoreEntryManager;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.WritableShimSerialization;
import org.apache.gobblin.util.executors.IteratorExecutor;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
import org.apache.gobblin.util.hadoop.GobblinSequenceFileReader;
/**
* A custom extension to {@link FsStateStore} for storing and reading {@link JobState.DatasetState}s.
*
* <p>
* The purpose of having this class is to hide some implementation details that are unnecessarily
* exposed if using the {@link FsStateStore} to store and serve job/dataset states between job runs.
* </p>
*
* <p>
* In addition to persisting and reading {@link JobState.DatasetState}s. This class is also able to
* read job state files of existing jobs that store serialized instances of {@link JobState} for
* backward compatibility.
* </p>
*
* @author Yinan Li
*/
public class FsDatasetStateStore extends FsStateStore<JobState.DatasetState> implements DatasetStateStore<JobState.DatasetState> {
private static final Logger LOGGER = LoggerFactory.getLogger(FsDatasetStateStore.class);
private int threadPoolOfGettingDatasetState;
private static final long CACHE_SIZE = 100;
private LoadingCache<Path, DatasetUrnStateStoreNameParser> stateStoreNameParserLoadingCache;
protected static DatasetStateStore<JobState.DatasetState> createStateStore(Config config, String className) {
// Add all job configuration properties so they are picked up by Hadoop
Configuration conf = new Configuration();
for (Map.Entry<String, ConfigValue> entry : config.entrySet()) {
conf.set(entry.getKey(), entry.getValue().unwrapped().toString());
}
try {
String stateStoreFsUri =
ConfigUtils.getString(config, ConfigurationKeys.STATE_STORE_FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI);
final FileSystem stateStoreFs = FileSystem.get(URI.create(stateStoreFsUri), conf);
String stateStoreRootDir = config.getString(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY);
Integer threadPoolOfGettingDatasetState = ConfigUtils
.getInt(config, ConfigurationKeys.THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE,
ConfigurationKeys.DEFAULT_THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE);
final String datasetUrnStateStoreNameParserClass = ConfigUtils
.getString(config, ConfigurationKeys.DATASETURN_STATESTORE_NAME_PARSER,
SimpleDatasetUrnStateStoreNameParser.class.getName());
LoadingCache<Path, DatasetUrnStateStoreNameParser> stateStoreNameParserLoadingCache =
CacheBuilder.newBuilder().maximumSize(CACHE_SIZE)
.build(new CacheLoader<Path, DatasetUrnStateStoreNameParser>() {
@Override
public DatasetUrnStateStoreNameParser load(Path stateStoreDirWithStoreName)
throws Exception {
return (DatasetUrnStateStoreNameParser) GobblinConstructorUtils
.invokeLongestConstructor(Class.forName(datasetUrnStateStoreNameParserClass), stateStoreFs,
stateStoreDirWithStoreName);
}
});
return (DatasetStateStore<JobState.DatasetState>) GobblinConstructorUtils
.invokeLongestConstructor(Class.forName(className), stateStoreFs, stateStoreRootDir,
threadPoolOfGettingDatasetState, stateStoreNameParserLoadingCache);
} catch (IOException e) {
throw new RuntimeException(e);
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Failed to instantiate " + className, e);
}
}
public FsDatasetStateStore(String fsUri, String storeRootDir)
throws IOException {
super(fsUri, storeRootDir, JobState.DatasetState.class);
this.useTmpFileForPut = false;
this.threadPoolOfGettingDatasetState = ConfigurationKeys.DEFAULT_THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE;
}
public FsDatasetStateStore(FileSystem fs, String storeRootDir, Integer threadPoolSize,
LoadingCache<Path, DatasetUrnStateStoreNameParser> stateStoreNameParserLoadingCache) {
super(fs, storeRootDir, JobState.DatasetState.class);
this.useTmpFileForPut = false;
this.threadPoolOfGettingDatasetState = threadPoolSize;
this.stateStoreNameParserLoadingCache = stateStoreNameParserLoadingCache;
}
public FsDatasetStateStore(FileSystem fs, String storeRootDir, Integer threadPoolSize) {
this(fs, storeRootDir, threadPoolSize, null);
}
public FsDatasetStateStore(FileSystem fs, String storeRootDir) {
this(fs, storeRootDir, ConfigurationKeys.DEFAULT_THREADPOOL_SIZE_OF_LISTING_FS_DATASET_STATESTORE);
}
public FsDatasetStateStore(String storeUrl)
throws IOException {
super(storeUrl, JobState.DatasetState.class);
this.useTmpFileForPut = false;
}
@Override
public String sanitizeDatasetStatestoreNameFromDatasetURN(String storeName, String datasetURN)
throws IOException {
if (this.stateStoreNameParserLoadingCache == null) {
return datasetURN;
}
try {
Path statestoreDirWithStoreName = new Path(this.storeRootDir, storeName);
DatasetUrnStateStoreNameParser datasetUrnBasedStateStoreNameParser =
this.stateStoreNameParserLoadingCache.get(statestoreDirWithStoreName);
return datasetUrnBasedStateStoreNameParser.getStateStoreNameFromDatasetUrn(datasetURN);
} catch (ExecutionException e) {
throw new IOException("Failed to load dataset state store name parser: " + e, e);
}
}
@Override
public JobState.DatasetState get(String storeName, String tableName, String stateId)
throws IOException {
return getInternal(storeName, tableName, stateId, false);
}
public JobState.DatasetState getInternal(String storeName, String tableName, String stateId,
boolean sanitizeKeyForComparison)
throws IOException {
Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
if (!this.fs.exists(tablePath)) {
return null;
}
Configuration deserializeConf = new Configuration(this.conf);
WritableShimSerialization.addToHadoopConfiguration(deserializeConf);
try (@SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(this.fs, tablePath,
deserializeConf)) {
// This is necessary for backward compatibility as existing jobs are using the JobState class
Object writable = reader.getValueClass() == JobState.class ? new JobState() : new JobState.DatasetState();
try {
Text key = new Text();
while (reader.next(key)) {
String stringKey =
sanitizeKeyForComparison ? sanitizeDatasetStatestoreNameFromDatasetURN(storeName, key.toString())
: key.toString();
writable = reader.getCurrentValue(writable);
if (stringKey.equals(stateId)) {
if (writable instanceof JobState.DatasetState) {
return (JobState.DatasetState) writable;
}
return ((JobState) writable).newDatasetState(true);
}
}
} catch (Exception e) {
throw new IOException(e);
}
}
return null;
}
@Override
public List<JobState.DatasetState> getAll(String storeName, String tableName)
throws IOException {
List<JobState.DatasetState> states = Lists.newArrayList();
Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
if (!this.fs.exists(tablePath)) {
return states;
}
Configuration deserializeConfig = new Configuration(this.conf);
WritableShimSerialization.addToHadoopConfiguration(deserializeConfig);
try (@SuppressWarnings("deprecation") GobblinSequenceFileReader reader = new GobblinSequenceFileReader(this.fs,
tablePath, deserializeConfig)) {
/**
* Add this change so that all stateful flow will have back compatibility.
* Shim layer of state store is therefore avoided because of this change.
* Keep the implementation of Shim layer temporarily.
*/
String className = reader.getValueClassName();
if (className.startsWith("gobblin")) {
LOGGER.warn("There's old JobState with no apache package name being read while we cast them at runtime");
className = "org.apache." + className;
}
if (!className.equals(JobState.class.getName()) && !className.equals(JobState.DatasetState.class.getName())) {
throw new RuntimeException("There is a mismatch in the Class Type of state in state-store and that in runtime");
}
// This is necessary for backward compatibility as existing jobs are using the JobState class
Object writable = reader.getValueClass() == JobState.class ? new JobState() : new JobState.DatasetState();
try {
Text key = new Text();
while (reader.next(key)) {
writable = reader.getCurrentValue(writable);
if (writable instanceof JobState.DatasetState) {
states.add((JobState.DatasetState) writable);
writable = new JobState.DatasetState();
} else {
states.add(((JobState) writable).newDatasetState(true));
writable = new JobState();
}
}
} catch (Exception e) {
throw new IOException(e);
}
}
return states;
}
@Override
public List<JobState.DatasetState> getAll(String storeName)
throws IOException {
return super.getAll(storeName);
}
/**
* Get a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s.
*
* @param jobName the job name
* @return a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s
* @throws IOException if there's something wrong reading the {@link JobState.DatasetState}s
*/
public Map<String, JobState.DatasetState> getLatestDatasetStatesByUrns(final String jobName)
throws IOException {
Path stateStorePath = new Path(this.storeRootDir, jobName);
if (!this.fs.exists(stateStorePath)) {
return ImmutableMap.of();
}
FileStatus[] stateStoreFileStatuses = this.fs.listStatus(stateStorePath, new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX);
}
});
if (stateStoreFileStatuses == null || stateStoreFileStatuses.length == 0) {
return ImmutableMap.of();
}
final Map<String, JobState.DatasetState> datasetStatesByUrns = new ConcurrentHashMap<>();
Iterator<Callable<Void>> callableIterator = Iterators
.transform(Arrays.asList(stateStoreFileStatuses).iterator(), new Function<FileStatus, Callable<Void>>() {
@Override
public Callable<Void> apply(final FileStatus stateStoreFileStatus) {
return new Callable<Void>() {
@Override
public Void call()
throws Exception {
Path stateStoreFilePath = stateStoreFileStatus.getPath();
LOGGER.info("Getting dataset states from: {}", stateStoreFilePath);
List<JobState.DatasetState> previousDatasetStates = getAll(jobName, stateStoreFilePath.getName());
if (!previousDatasetStates.isEmpty()) {
// There should be a single dataset state on the list if the list is not empty
JobState.DatasetState previousDatasetState = previousDatasetStates.get(0);
datasetStatesByUrns.put(previousDatasetState.getDatasetUrn(), previousDatasetState);
}
return null;
}
};
}
});
try {
List<Either<Void, ExecutionException>> results =
new IteratorExecutor<>(callableIterator, this.threadPoolOfGettingDatasetState,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), Optional.of("GetFsDatasetStateStore-")))
.executeAndGetResults();
int maxNumberOfErrorLogs = 10;
IteratorExecutor.logAndThrowFailures(results, LOGGER, maxNumberOfErrorLogs);
} catch (InterruptedException e) {
throw new IOException("Failed to get latest dataset states.", e);
}
// The dataset (job) state from the deprecated "current.jst" will be read even though
// the job has transitioned to the new dataset-based mechanism
if (datasetStatesByUrns.size() > 1) {
datasetStatesByUrns.remove(ConfigurationKeys.DEFAULT_DATASET_URN);
}
return datasetStatesByUrns;
}
/**
* Get the latest {@link JobState.DatasetState} of a given dataset.
*
* @param storeName the name of the dataset state store
* @param datasetUrn the dataset URN
* @return the latest {@link JobState.DatasetState} of the dataset or {@link null} if it is not found
* @throws IOException
*/
public JobState.DatasetState getLatestDatasetState(String storeName, String datasetUrn)
throws IOException {
String alias =
Strings.isNullOrEmpty(datasetUrn) ? CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX
: sanitizeDatasetStatestoreNameFromDatasetURN(storeName, CharMatcher.is(':').replaceFrom(datasetUrn, '.'))
+ "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
return get(storeName, alias, datasetUrn);
}
/**
* Persist a given {@link JobState.DatasetState}.
*
* @param datasetUrn the dataset URN
* @param datasetState the {@link JobState.DatasetState} to persist
* @throws IOException if there's something wrong persisting the {@link JobState.DatasetState}
*/
public void persistDatasetState(String datasetUrn, JobState.DatasetState datasetState)
throws IOException {
String jobName = datasetState.getJobName();
String jobId = datasetState.getJobId();
datasetUrn = CharMatcher.is(':').replaceFrom(datasetUrn, '.');
String datasetStatestoreName = sanitizeDatasetStatestoreNameFromDatasetURN(jobName, datasetUrn);
String tableName = Strings.isNullOrEmpty(datasetUrn) ? sanitizeJobId(jobId) + DATASET_STATE_STORE_TABLE_SUFFIX
: datasetStatestoreName + "-" + sanitizeJobId(jobId) + DATASET_STATE_STORE_TABLE_SUFFIX;
LOGGER.info("Persisting " + tableName + " to the job state store");
put(jobName, tableName, datasetState);
createAlias(jobName, tableName, getAliasName(datasetStatestoreName));
Path originalDatasetUrnPath = new Path(new Path(this.storeRootDir, jobName), getAliasName(datasetUrn));
// This should only happen for the first time.
if (!Strings.isNullOrEmpty(datasetUrn) && !datasetStatestoreName.equals(datasetUrn) && this.fs
.exists(originalDatasetUrnPath)) {
LOGGER.info("Removing previous datasetUrn path: " + originalDatasetUrnPath);
fs.delete(originalDatasetUrnPath, true);
}
}
private String sanitizeJobId(String jobId) {
return jobId.replaceAll("[-/]", "_");
}
@Override
public void persistDatasetURNs(String storeName, Collection<String> datasetUrns)
throws IOException {
if (this.stateStoreNameParserLoadingCache == null) {
return;
}
try {
this.stateStoreNameParserLoadingCache.get(new Path(this.storeRootDir, storeName)).persistDatasetUrns(datasetUrns);
} catch (ExecutionException e) {
throw new IOException("Failed to persist datasetUrns.", e);
}
}
private static String getAliasName(String datasetStatestoreName) {
return Strings.isNullOrEmpty(datasetStatestoreName) ? CURRENT_DATASET_STATE_FILE_SUFFIX
+ DATASET_STATE_STORE_TABLE_SUFFIX
: datasetStatestoreName + "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
}
@Override
public List<FsDatasetStateStoreEntryManager> getMetadataForTables(StateStorePredicate predicate)
throws IOException {
Stream<Path> stores = predicate instanceof StoreNamePredicate ? Stream
.of(new Path(this.storeRootDir, ((StoreNamePredicate) predicate).getStoreName()))
: lsStream(new Path(this.storeRootDir)).map(FileStatus::getPath);
if (stores == null) {
return Lists.newArrayList();
}
Stream<FileStatus> tables = stores.flatMap(this::lsStream);
return tables.map(this::parseMetadataFromPath).filter(predicate::apply).collect(Collectors.toList());
}
private Stream<FileStatus> lsStream(Path path) {
try {
FileStatus[] ls = this.fs.listStatus(path, new HiddenFilter());
return ls == null ? Stream.empty() : Arrays.stream(ls);
} catch (IOException ioe) {
return Stream.empty();
}
}
private FsDatasetStateStoreEntryManager parseMetadataFromPath(FileStatus status) {
return new FsDatasetStateStoreEntryManager(status, this);
}
}
| 1,359 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/MultiConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.Constructs;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.EmptyIterable;
import org.apache.gobblin.converter.IdentityConverter;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.state.ConstructState;
import lombok.Getter;
/**
* An implementation of {@link Converter} that applies a given list of {@link Converter}s in the given order.
*
* @author Yinan Li
*/
@SuppressWarnings("unchecked")
public class MultiConverter extends Converter<Object, Object, Object, Object> {
// The list of converters to be applied
@Getter
private final List<Converter<?, ?, ?, ?>> converters;
// Remember the mapping between converter and schema it generates
private final Map<Converter<?, ?, ?, ?>, Object> convertedSchemaMap = Maps.newHashMap();
public MultiConverter(List<Converter<?, ?, ?, ?>> converters) {
// Make a copy to guard against changes to the converters from outside
this.converters = Lists.newArrayList(converters);
}
@Override
public void close() throws IOException {
for (Converter<?, ?, ?, ?> converter : this.converters) {
converter.close();
}
}
@Override
public Object convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
Object schema = inputSchema;
for (Converter converter : this.converters) {
// Apply the converter and remember the output schema of this converter
schema = converter.convertSchema(schema, workUnit);
this.convertedSchemaMap.put(converter, schema);
}
return schema;
}
@Override
public Iterable<Object> convertRecord(Object outputSchema, final Object inputRecord, final WorkUnitState workUnit)
throws DataConversionException {
if (this.convertedSchemaMap.size() != this.converters.size()) {
throw new RuntimeException("convertRecord should be called only after convertSchema is called");
}
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
try {
return new MultiConverterIterator(inputRecord, workUnit);
} catch (DataConversionException dce) {
throw new RuntimeException(dce);
}
}
};
}
@Override
public State getFinalState() {
ConstructState state = new ConstructState(super.getFinalState());
for (int i = 0; i < this.converters.size(); i++) {
state.addConstructState(Constructs.CONVERTER, new ConstructState(this.converters.get(i).getFinalState()),
Integer.toString(i));
}
return state;
}
/**
* A type of {@link java.util.Iterator} to be used with {@link MultiConverter}. The Converter uses the
* {@link ChainedConverterIterator} to chain iterators together. The first {@link ChainedConverterIterator} created
* contains an iterator with only the inputRecord and the first converter in the converters list. Each subsequent
* {@link ChainedConverterIterator} is created using the previous {@link ChainedConverterIterator} along with the next
* converter in the converters list. By chaining the converters and iterators in this fashion, a reference to the last
* {@link ChainedConverterIterator} will be sufficient to iterate through all the data.
*/
private class MultiConverterIterator implements Iterator<Object> {
private final WorkUnitState workUnitState;
private Iterator<Object> chainedConverterIterator;
public MultiConverterIterator(Object inputRecord, WorkUnitState workUnitState) throws DataConversionException {
this.workUnitState = workUnitState;
this.chainedConverterIterator = new ChainedConverterIterator(new SingleRecordIterable<>(inputRecord).iterator(),
MultiConverter.this.converters.isEmpty() ? new IdentityConverter() : MultiConverter.this.converters.get(0));
for (int i = 1; i < MultiConverter.this.converters.size(); i++) {
this.chainedConverterIterator =
new ChainedConverterIterator(this.chainedConverterIterator, MultiConverter.this.converters.get(i));
}
}
@Override
public boolean hasNext() {
return this.chainedConverterIterator.hasNext();
}
@Override
public Object next() {
return this.chainedConverterIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/**
* A helper class that implements {@link Iterator}. It is constructed with a {@link Iterator} and a {@link Converter}.
* The class iterates through the results of each converted record from prevIterator. It iterates through each
* element in prevIterator, converts the result, and then stores the result in the currentIterator object. It
* returns every element in currentIterator until it is empty, and then it gets the next element from prevIterator,
* converts the object, and stores the result in currentIterator. This pattern continues until there are no more
* elements left in prevIterator.
*/
private class ChainedConverterIterator implements Iterator<Object> {
private final Converter converter;
private final Iterator<Object> prevIterator;
private Iterator<Object> currentIterator;
public ChainedConverterIterator(Iterator<Object> prevIterator, Converter converter)
throws DataConversionException {
this.converter = converter;
this.prevIterator = prevIterator;
if (this.prevIterator.hasNext()) {
this.currentIterator = converter.convertRecord(MultiConverter.this.convertedSchemaMap.get(converter),
this.prevIterator.next(), MultiConverterIterator.this.workUnitState).iterator();
} else {
this.currentIterator = new EmptyIterable<>().iterator();
}
}
@Override
public boolean hasNext() {
if (this.currentIterator.hasNext()) {
return true;
}
while (this.prevIterator.hasNext()) {
try {
this.currentIterator =
this.converter.convertRecord(MultiConverter.this.convertedSchemaMap.get(this.converter),
this.prevIterator.next(), MultiConverterIterator.this.workUnitState).iterator();
} catch (DataConversionException e) {
Throwables.propagate(e);
}
if (this.currentIterator.hasNext()) {
return true;
}
}
return false;
}
@Override
public Object next() {
if (this.hasNext()) {
return this.currentIterator.next();
}
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
}
| 1,360 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/MysqlDatasetStateStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import javax.sql.DataSource;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.MysqlDataSourceFactory;
@Alias("mysql")
public class MysqlDatasetStateStoreFactory implements DatasetStateStore.Factory {
@Override
public DatasetStateStore<JobState.DatasetState> createStateStore(Config config) {
String stateStoreTableName = config.hasPath(ConfigurationKeys.STATE_STORE_DB_TABLE_KEY) ?
config.getString(ConfigurationKeys.STATE_STORE_DB_TABLE_KEY) :
ConfigurationKeys.DEFAULT_STATE_STORE_DB_TABLE;
boolean compressedValues = config.hasPath(ConfigurationKeys.STATE_STORE_COMPRESSED_VALUES_KEY) ?
config.getBoolean(ConfigurationKeys.STATE_STORE_COMPRESSED_VALUES_KEY) :
ConfigurationKeys.DEFAULT_STATE_STORE_COMPRESSED_VALUES;
try {
DataSource dataSource = MysqlDataSourceFactory.get(config,
SharedResourcesBrokerFactory.getImplicitBroker());
return new MysqlDatasetStateStore(dataSource, stateStoreTableName, compressedValues);
} catch (Exception e) {
throw new RuntimeException("Failed to create MysqlDatasetStateStore with factory", e);
}
}
}
| 1,361 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskStateCollectorService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Queues;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.AbstractScheduledService;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ParallelRunner;
/**
* An {@link AbstractScheduledService} for collecting output {@link TaskState}s of completed {@link Task}s
* stored in files, which get deleted once the {@link TaskState}s they store are successfully collected.
* For each batch of {@link TaskState}s collected, it posts a {@link NewTaskCompletionEvent} to notify
* parties that are interested in such events.
*
* @author Yinan Li
*/
@Slf4j
public class TaskStateCollectorService extends AbstractScheduledService {
private static final Logger LOGGER = LoggerFactory.getLogger(TaskStateCollectorService.class);
private final JobState jobState;
private final EventBus eventBus;
private final EventSubmitter eventSubmitter;
// Number of ParallelRunner threads to be used for state serialization/deserialization
private final int stateSerDeRunnerThreads;
// Interval in seconds between two runs of the collector of output TaskStates
private final int outputTaskStatesCollectorIntervalSeconds;
private final StateStore<TaskState> taskStateStore;
private final Path outputTaskStateDir;
private double totalSizeToCopy;
private double bytesCopiedSoFar;
private double totalNumWorkUnits;
private double workUnitsCompletedSoFar;
private double lastPercentageReported;
/**
* Add a closeable action to run after each existence-checking of task state file.
* A typical example to plug here is hive registration:
* We do hive registration everytime there are available taskStates deserialized from storage, on the driver level.
*/
@Getter
private final Optional<TaskStateCollectorServiceHandler> optionalTaskCollectorHandler;
private final Closer handlerCloser = Closer.create();
private final boolean isJobProceedOnCollectorServiceFailure;
/**
* By default, whether {@link TaskStateCollectorService} finishes successfully or not won't influence
* job's proceed.
*/
private static final boolean defaultPolicyOnCollectorServiceFailure = true;
private final IssueRepository issueRepository;
private final AtomicBoolean reportedIssueConsumptionWarning = new AtomicBoolean(false);
public TaskStateCollectorService(Properties jobProps, JobState jobState, EventBus eventBus,
EventSubmitter eventSubmitter, StateStore<TaskState> taskStateStore, Path outputTaskStateDir,
IssueRepository issueRepository) {
this.jobState = jobState;
this.eventBus = eventBus;
this.eventSubmitter = eventSubmitter;
this.taskStateStore = taskStateStore;
this.outputTaskStateDir = outputTaskStateDir;
this.issueRepository = issueRepository;
this.stateSerDeRunnerThreads = Integer.parseInt(jobProps.getProperty(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY,
Integer.toString(ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS)));
this.outputTaskStatesCollectorIntervalSeconds =
Integer.parseInt(jobProps.getProperty(ConfigurationKeys.TASK_STATE_COLLECTOR_INTERVAL_SECONDS,
Integer.toString(ConfigurationKeys.DEFAULT_TASK_STATE_COLLECTOR_INTERVAL_SECONDS)));
if (!StringUtils.isBlank(jobProps.getProperty(ConfigurationKeys.TASK_STATE_COLLECTOR_HANDLER_CLASS))) {
String handlerTypeName = jobProps.getProperty(ConfigurationKeys.TASK_STATE_COLLECTOR_HANDLER_CLASS);
try {
ClassAliasResolver<TaskStateCollectorServiceHandler.TaskStateCollectorServiceHandlerFactory> aliasResolver =
new ClassAliasResolver<>(TaskStateCollectorServiceHandler.TaskStateCollectorServiceHandlerFactory.class);
TaskStateCollectorServiceHandler.TaskStateCollectorServiceHandlerFactory handlerFactory =
aliasResolver.resolveClass(handlerTypeName).newInstance();
optionalTaskCollectorHandler = Optional.of(handlerCloser.register(handlerFactory.createHandler(this.jobState)));
} catch (ReflectiveOperationException rfe) {
throw new RuntimeException("Could not construct TaskCollectorHandler " + handlerTypeName, rfe);
}
} else {
optionalTaskCollectorHandler = Optional.absent();
}
isJobProceedOnCollectorServiceFailure =
jobState.getPropAsBoolean(ConfigurationKeys.JOB_PROCEED_ON_TASK_STATE_COLLECOTR_SERVICE_FAILURE,
defaultPolicyOnCollectorServiceFailure);
}
@Override
protected void runOneIteration() throws Exception {
collectOutputTaskStates();
}
@Override
protected Scheduler scheduler() {
return Scheduler.newFixedRateSchedule(this.outputTaskStatesCollectorIntervalSeconds,
this.outputTaskStatesCollectorIntervalSeconds, TimeUnit.SECONDS);
}
@Override
protected void startUp() throws Exception {
LOGGER.info("Starting the " + TaskStateCollectorService.class.getSimpleName());
super.startUp();
}
@Override
protected void shutDown() throws Exception {
LOGGER.info("Stopping the " + TaskStateCollectorService.class.getSimpleName());
try {
runOneIteration();
} finally {
super.shutDown();
this.handlerCloser.close();
}
}
/**
* Collect output {@link TaskState}s of tasks of the job launched.
*
* <p>
* This method collects all available output {@link TaskState} files at the time it is called. It
* uses a {@link ParallelRunner} to deserialize the {@link TaskState}s. Each {@link TaskState}
* file gets deleted after the {@link TaskState} it stores is successfully collected.
* </p>
*
* @throws IOException if it fails to collect the output {@link TaskState}s
*/
private void collectOutputTaskStates() throws IOException {
List<String> taskStateNames = taskStateStore.getTableNames(outputTaskStateDir.getName(), new Predicate<String>() {
@Override
public boolean apply(String input) {
return input.endsWith(AbstractJobLauncher.TASK_STATE_STORE_TABLE_SUFFIX)
&& !input.startsWith(FsStateStore.TMP_FILE_PREFIX);
}});
if (taskStateNames == null || taskStateNames.size() == 0) {
LOGGER.debug("No output task state files found in " + this.outputTaskStateDir);
return;
}
final Queue<TaskState> taskStateQueue = Queues.newConcurrentLinkedQueue();
try (ParallelRunner stateSerDeRunner = new ParallelRunner(this.stateSerDeRunnerThreads, null)) {
for (final String taskStateName : taskStateNames) {
LOGGER.debug("Found output task state file " + taskStateName);
// Deserialize the TaskState and delete the file
stateSerDeRunner.submitCallable(new Callable<Void>() {
@Override
public Void call() throws Exception {
TaskState taskState = taskStateStore.getAll(outputTaskStateDir.getName(), taskStateName).get(0);
taskStateQueue.add(taskState);
taskStateStore.delete(outputTaskStateDir.getName(), taskStateName);
return null;
}
}, "Deserialize state for " + taskStateName);
}
} catch (IOException ioe) {
LOGGER.warn("Could not read all task state files.");
}
LOGGER.info(String.format("Collected task state of %d completed tasks", taskStateQueue.size()));
// Add the TaskStates of completed tasks to the JobState so when the control
// returns to the launcher, it sees the TaskStates of all completed tasks.
for (TaskState taskState : taskStateQueue) {
consumeTaskIssues(taskState);
taskState.setJobState(this.jobState);
this.jobState.addTaskState(taskState);
if (this.jobState.getPropAsBoolean(ConfigurationKeys.REPORT_JOB_PROGRESS, ConfigurationKeys.DEFAULT_REPORT_JOB_PROGRESS)) {
reportJobProgress(taskState);
}
}
// Finish any additional steps defined in handler on driver level.
// Currently implemented handler for Hive registration only.
if (optionalTaskCollectorHandler.isPresent()) {
LOGGER.info("Execute Pipelined TaskStateCollectorService Handler for " + taskStateQueue.size() + " tasks");
try {
optionalTaskCollectorHandler.get().handle(taskStateQueue);
} catch (Throwable t) {
if (isJobProceedOnCollectorServiceFailure) {
log.error("Failed to commit dataset while job proceeds", t);
SafeDatasetCommit.setTaskFailureException(taskStateQueue, t);
} else {
throw new RuntimeException("Hive Registration as the TaskStateCollectorServiceHandler failed.", t);
}
}
}
// Notify the listeners for the completion of the tasks
this.eventBus.post(new NewTaskCompletionEvent(ImmutableList.copyOf(taskStateQueue)));
}
/**
* Uses the size of work units to determine a job's progress and reports the progress as a percentage via
* GobblinTrackingEvents
* @param taskState of job launched
*/
private void reportJobProgress(TaskState taskState) {
String stringSize = taskState.getProp(ServiceConfigKeys.WORK_UNIT_SIZE);
if (stringSize == null) {
LOGGER.warn("Expected to report job progress but work unit byte size property null");
return;
}
Long taskByteSize = Long.parseLong(stringSize);
// If progress reporting is enabled, value should be present
if (!this.jobState.contains(ServiceConfigKeys.TOTAL_WORK_UNIT_SIZE)) {
LOGGER.warn("Expected to report job progress but total bytes to copy property null");
return;
}
this.totalSizeToCopy = this.jobState.getPropAsLong(ServiceConfigKeys.TOTAL_WORK_UNIT_SIZE);
// If total size in bytes cannot be calculated, then default to progress reporting in terms of workunits
Double newPercentageCopied;
if (this.totalSizeToCopy == 0) {
this.totalNumWorkUnits = this.jobState.getPropAsLong(AbstractJobLauncher.NUM_WORKUNITS);
this.workUnitsCompletedSoFar += 1;
if (this.totalNumWorkUnits == 0) {
LOGGER.warn("Expected to report job progress but work units are not countable");
return;
}
newPercentageCopied = this.workUnitsCompletedSoFar / this.totalNumWorkUnits;
} else {
this.bytesCopiedSoFar += taskByteSize;
newPercentageCopied = this.bytesCopiedSoFar / this.totalSizeToCopy;
}
// Avoid flooding Kafka message queue by sending GobblinTrackingEvents only when threshold is passed
// Report progress when it reaches 100% regardless of difference from lastPercentageReported
if (newPercentageCopied - this.lastPercentageReported >= ConfigurationKeys.DEFAULT_PROGRESS_REPORTING_THRESHOLD ||
(Math.abs(newPercentageCopied - 1.0) < 0.001)) {
this.lastPercentageReported = newPercentageCopied;
int percentageToReport = (int) Math.round(this.lastPercentageReported * 100);
Map<String, String> progress = new HashMap<>();
progress.put(TimingEvent.JOB_COMPLETION_PERCENTAGE, String.valueOf(percentageToReport));
LOGGER.info("Sending copy progress event with percentage " + percentageToReport + "%");
new TimingEvent(this.eventSubmitter, TimingEvent.JOB_COMPLETION_PERCENTAGE).stop(progress);
}
}
private void consumeTaskIssues(TaskState taskState) {
List<Issue> taskIssues = taskState.getTaskIssues();
/* A single job can spawn tens of thousands of tasks, and in case of wide-spread errors they will all produce
* similar Issues with large exception stack traces. If the process that collects task states keeps all of them in
* job state, it can run out of memory. To avoid that, we're forwarding issues to central repository that has
* size limits, and then remove them from the task state.
* */
if (taskIssues != null) {
try {
issueRepository.put(taskIssues);
} catch (TroubleshooterException e) {
if (reportedIssueConsumptionWarning.compareAndSet(false, true)) {
log.warn("Failed to consume task issues", e);
}
}
taskState.setTaskIssues(null);
}
}
}
| 1,362 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TimeBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.concurrent.TimeUnit;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.TimeBasedLimiter}.
*/
@Deprecated
public class TimeBasedLimiter extends org.apache.gobblin.util.limiter.TimeBasedLimiter {
public TimeBasedLimiter(long timeLimit) {
super(timeLimit);
}
public TimeBasedLimiter(long timeLimit, TimeUnit timeUnit) {
super(timeLimit, timeUnit);
}
}
| 1,363 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskStateCollectorServiceHiveRegHandlerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import org.apache.gobblin.annotation.Alias;
@Alias("hivereg")
/**
* Implementation of TaskStateCollectorServiceHandlerFactory that specific to Hive Registration as the action to be
* taken in TaskStateCollectorService.
*/
public class TaskStateCollectorServiceHiveRegHandlerFactory
implements TaskStateCollectorServiceHandler.TaskStateCollectorServiceHandlerFactory {
@Override
public TaskStateCollectorServiceHandler createHandler(JobState jobState) {
return new HiveRegTaskStateCollectorServiceHandlerImpl(jobState);
}
}
| 1,364 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/DatasetTaskSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import lombok.Data;
import org.apache.gobblin.metrics.DatasetMetric;
/**
* A class returned by {@link org.apache.gobblin.runtime.SafeDatasetCommit} to provide metrics for the dataset
* that can be reported as a single event in the commit phase.
*/
@Data
public class DatasetTaskSummary {
private final String datasetUrn;
private final long recordsWritten;
private final long bytesWritten;
private final boolean successfullyCommitted;
/**
* Convert a {@link DatasetTaskSummary} to a {@link DatasetMetric}.
*/
public static DatasetMetric toDatasetMetric(DatasetTaskSummary datasetTaskSummary) {
return new DatasetMetric(datasetTaskSummary.getDatasetUrn(), datasetTaskSummary.getBytesWritten(), datasetTaskSummary.getRecordsWritten(), datasetTaskSummary.isSuccessfullyCommitted());
}
}
| 1,365 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ForkException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* An exception that is thrown when anything wrong with a fork.
*/
public class ForkException extends Exception {
private static final long serialVersionUID = -7131035635096992762L;
public ForkException(String message, Throwable cause) {
super(message, cause);
}
public ForkException(String message) {
super(message);
}
}
| 1,366 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ExecutionModel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import org.apache.gobblin.annotation.Alpha;
/**
* An Enum to capture the execution model of a specific Gobblin task.
*/
@Alpha
public enum ExecutionModel {
BATCH, // Tasks start and stop
STREAMING // Tasks run continuously until failure / termination
}
| 1,367 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ThrowableWithErrorCode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
public interface ThrowableWithErrorCode {
/**
* Returns an error code that uniquely identifies the problem in the given context.
*
* The error code will be used programmatically, to take different recovery actions.
*
* Sample error codes: SourceDatasetNotFound, MissingDatasetWritePermissions
* */
String getErrorCode();
}
| 1,368 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/SafeDatasetCommit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.commit.DeliverySemantics;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.FailureEventBuilder;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.publisher.CommitSequencePublisher;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.DataPublisherFactory;
import org.apache.gobblin.publisher.UnpublishedHandling;
import org.apache.gobblin.runtime.commit.DatasetStateCommitStep;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
/**
* {@link Callable} that commits a single dataset. The logic in this class is thread-safe, however, it calls
* {@link DataPublisher#publish(Collection)}. This class is thread-safe if and only if the implementation of
* {@link DataPublisher} used is also thread-safe.
*/
@RequiredArgsConstructor
@Slf4j
final class SafeDatasetCommit implements Callable<Void> {
private static final Object GLOBAL_LOCK = new Object();
private static final String DATASET_STATE = "datasetState";
private static final String FAILED_DATASET_EVENT = "failedDataset";
private final boolean shouldCommitDataInJob;
private final boolean isJobCancelled;
private final DeliverySemantics deliverySemantics;
private final String datasetUrn;
private final JobState.DatasetState datasetState;
private final boolean isMultithreaded;
private final JobContext jobContext;
private MetricContext metricContext;
@Override
public Void call()
throws Exception {
if (this.datasetState.getState() == JobState.RunningState.COMMITTED) {
log.info(this.datasetUrn + " have been committed.");
return null;
}
metricContext = Instrumented.getMetricContext(datasetState, SafeDatasetCommit.class);
finalizeDatasetStateBeforeCommit(this.datasetState);
Class<? extends DataPublisher> dataPublisherClass;
try (Closer closer = Closer.create()) {
dataPublisherClass = JobContext.getJobDataPublisherClass(this.jobContext.getJobState())
.or((Class<? extends DataPublisher>) Class.forName(ConfigurationKeys.DEFAULT_DATA_PUBLISHER_TYPE));
if (!canCommitDataset(datasetState)) {
log.warn(String
.format("Not committing dataset %s of job %s with commit policy %s and state %s", this.datasetUrn,
this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(), this.datasetState.getState()));
checkForUnpublishedWUHandling(this.datasetUrn, this.datasetState, dataPublisherClass, closer);
throw new RuntimeException(String
.format("Not committing dataset %s of job %s with commit policy %s and state %s", this.datasetUrn,
this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(), this.datasetState.getState()));
}
} catch (ReflectiveOperationException roe) {
log.error("Failed to instantiate data publisher for dataset {} of job {}.", this.datasetUrn,
this.jobContext.getJobId(), roe);
throw new RuntimeException(roe);
} finally {
maySubmitFailureEvent(datasetState);
}
if (this.isJobCancelled) {
log.info("Executing commit steps although job is cancelled due to job commit policy: " + this.jobContext
.getJobCommitPolicy());
}
Optional<CommitSequence.Builder> commitSequenceBuilder = Optional.absent();
boolean canPersistStates = true;
try (Closer closer = Closer.create()) {
if (this.shouldCommitDataInJob) {
log.info(String.format("Committing dataset %s of job %s with commit policy %s and state %s", this.datasetUrn,
this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(), this.datasetState.getState()));
ListMultimap<TaskFactoryWrapper, TaskState> taskStatesByFactory = groupByTaskFactory(this.datasetState);
for (Map.Entry<TaskFactoryWrapper, Collection<TaskState>> entry : taskStatesByFactory.asMap().entrySet()) {
TaskFactory taskFactory = entry.getKey().getTaskFactory();
if (this.deliverySemantics == DeliverySemantics.EXACTLY_ONCE) {
if (taskFactory != null) {
throw new RuntimeException("Custom task factories do not support exactly once delivery semantics.");
}
generateCommitSequenceBuilder(this.datasetState, entry.getValue());
} else {
DataPublisher publisher;
if (taskFactory == null) {
publisher = DataPublisherFactory.get(dataPublisherClass.getName(), this.jobContext.getJobState(),
this.jobContext.getJobBroker());
// non-threadsafe publishers are not shareable and are not retained in the broker, so register them with
// the closer
if (!DataPublisherFactory.isPublisherCacheable(publisher)) {
closer.register(publisher);
}
} else {
// NOTE: sharing of publishers is not supported when they are instantiated through the TaskFactory.
// This should be revisited if sharing is required.
publisher = taskFactory.createDataPublisher(this.datasetState);
}
if (this.isJobCancelled) {
if (publisher.canBeSkipped()) {
log.warn(publisher.getClass() + " will be skipped.");
} else {
canPersistStates = false;
throw new RuntimeException(
"Cannot persist state upon cancellation because publisher has unfinished work and cannot be skipped.");
}
} else if (this.isMultithreaded && !publisher.isThreadSafe()) {
log.warn(String.format(
"Gobblin is set up to parallelize publishing, however the publisher %s is not thread-safe. "
+ "Falling back to serial publishing.", publisher.getClass().getName()));
safeCommitDataset(entry.getValue(), publisher);
} else {
commitDataset(entry.getValue(), publisher);
}
}
}
this.datasetState.setState(JobState.RunningState.COMMITTED);
} else {
if (this.datasetState.getState() == JobState.RunningState.SUCCESSFUL) {
this.datasetState.setState(JobState.RunningState.COMMITTED);
}
}
} catch (Throwable throwable) {
log.error(String.format("Failed to commit dataset state for dataset %s of job %s", this.datasetUrn,
this.jobContext.getJobId()), throwable);
throw new RuntimeException(throwable);
} finally {
try {
finalizeDatasetState(datasetState, datasetUrn);
maySubmitFailureEvent(datasetState);
maySubmitLineageEvent(datasetState);
if (commitSequenceBuilder.isPresent()) {
buildAndExecuteCommitSequence(commitSequenceBuilder.get(), datasetState, datasetUrn);
datasetState.setState(JobState.RunningState.COMMITTED);
} else if (canPersistStates) {
persistDatasetState(datasetUrn, datasetState);
}
} catch (IOException | RuntimeException ioe) {
log.error(String
.format("Failed to persist dataset state for dataset %s of job %s", datasetUrn, this.jobContext.getJobId()),
ioe);
throw new RuntimeException(ioe);
}
}
return null;
}
private void maySubmitFailureEvent(JobState.DatasetState datasetState) {
if (datasetState.getState() == JobState.RunningState.FAILED) {
FailureEventBuilder failureEvent = new FailureEventBuilder(FAILED_DATASET_EVENT);
failureEvent.addMetadata(DATASET_STATE, datasetState.toString());
failureEvent.submit(metricContext);
}
}
private void maySubmitLineageEvent(JobState.DatasetState datasetState) {
Collection<TaskState> allStates = datasetState.getTaskStates();
Collection<TaskState> states = Lists.newArrayList();
// Filter out failed states or states that don't have lineage info
for (TaskState state : allStates) {
if (state.getWorkingState() == WorkUnitState.WorkingState.COMMITTED &&
LineageInfo.hasLineageInfo(state)) {
states.add(state);
}
}
if (states.size() == 0) {
log.info("Will not submit lineage events as no state contains lineage info");
return;
}
try {
if (StringUtils.isEmpty(datasetUrn)) {
// This dataset may contain different kinds of LineageEvent
for (Map.Entry<String, Collection<WorkUnitState>> entry : LineageInfo.aggregateByLineageEvent(states).entrySet()) {
LineageInfo.submitLineageEvent(entry.getKey(), entry.getValue(), metricContext);
}
} else {
LineageInfo.submitLineageEvent(datasetUrn, states, metricContext);
}
} finally {
// Purge lineage info from all states
for (TaskState taskState : allStates) {
LineageInfo.purgeLineageInfo(taskState);
}
}
}
/**
* Synchronized version of {@link #commitDataset(Collection, DataPublisher)} used when publisher is not
* thread safe.
*/
private void safeCommitDataset(Collection<TaskState> taskStates, DataPublisher publisher) {
synchronized (GLOBAL_LOCK) {
commitDataset(taskStates, publisher);
}
}
/**
* Commit the output data of a dataset.
*/
private void commitDataset(Collection<TaskState> taskStates, DataPublisher publisher) {
try {
publisher.publish(taskStates);
} catch (Throwable t) {
log.error("Failed to commit dataset", t);
setTaskFailureException(taskStates, t);
}
}
private ListMultimap<TaskFactoryWrapper, TaskState> groupByTaskFactory(JobState.DatasetState datasetState) {
ListMultimap<TaskFactoryWrapper, TaskState> groupsMap = ArrayListMultimap.create();
for (TaskState taskState : datasetState.getTaskStates()) {
groupsMap.put(new TaskFactoryWrapper(TaskUtils.getTaskFactory(taskState).orNull()), taskState);
}
return groupsMap;
}
@Data
private static class TaskFactoryWrapper {
private final TaskFactory taskFactory;
public boolean equals(Object other) {
if (!(other instanceof TaskFactoryWrapper)) {
return false;
}
if (this.taskFactory == null) {
return ((TaskFactoryWrapper) other).taskFactory == null;
}
return ((TaskFactoryWrapper) other).taskFactory != null && this.taskFactory.getClass()
.equals(((TaskFactoryWrapper) other).taskFactory.getClass());
}
public int hashCode() {
final int PRIME = 59;
int result = 1;
final Class<?> klazz = this.taskFactory == null ? null : this.taskFactory.getClass();
result = result * PRIME + (klazz == null ? 43 : klazz.hashCode());
return result;
}
}
private synchronized void buildAndExecuteCommitSequence(CommitSequence.Builder builder,
JobState.DatasetState datasetState, String datasetUrn)
throws IOException {
CommitSequence commitSequence =
builder.addStep(buildDatasetStateCommitStep(datasetUrn, datasetState).get()).build();
this.jobContext.getCommitSequenceStore().get().put(commitSequence.getJobName(), datasetUrn, commitSequence);
commitSequence.execute();
this.jobContext.getCommitSequenceStore().get().delete(commitSequence.getJobName(), datasetUrn);
}
/**
* Finalize a given {@link JobState.DatasetState} before committing the dataset.
*
* This method is thread-safe.
*/
private void finalizeDatasetStateBeforeCommit(JobState.DatasetState datasetState) {
for (TaskState taskState : datasetState.getTaskStates()) {
if (taskState.getWorkingState() != WorkUnitState.WorkingState.SUCCESSFUL
&& this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS) {
// The dataset state is set to FAILED if any task failed and COMMIT_ON_FULL_SUCCESS is used
datasetState.setState(JobState.RunningState.FAILED);
datasetState.incrementJobFailures();
Optional<String> taskStateException = taskState.getTaskFailureException();
log.warn("At least one task did not get committed successfully. Setting dataset state to FAILED. "
+ (taskStateException.isPresent() ? taskStateException.get() : "Exception not set."));
return;
}
}
datasetState.setState(JobState.RunningState.SUCCESSFUL);
datasetState.setNoJobFailure();
}
/**
* Check if it is OK to commit the output data of a dataset.
*
* <p>
* A dataset can be committed if and only if any of the following conditions is satisfied:
*
* <ul>
* <li>The {@link JobCommitPolicy#COMMIT_ON_PARTIAL_SUCCESS} policy is used.</li>
* <li>The {@link JobCommitPolicy#COMMIT_SUCCESSFUL_TASKS} policy is used.</li>
* <li>The {@link JobCommitPolicy#COMMIT_ON_FULL_SUCCESS} policy is used and all of the tasks succeed.</li>
* </ul>
* </p>
* This method is thread-safe.
*/
private boolean canCommitDataset(JobState.DatasetState datasetState) {
// Only commit a dataset if 1) COMMIT_ON_PARTIAL_SUCCESS is used, or 2)
// COMMIT_ON_FULL_SUCCESS is used and all of the tasks of the dataset have succeeded.
return this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_PARTIAL_SUCCESS
|| this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS || (
this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS
&& datasetState.getState() == JobState.RunningState.SUCCESSFUL);
}
@SuppressWarnings("unchecked")
private Optional<CommitSequence.Builder> generateCommitSequenceBuilder(JobState.DatasetState datasetState,
Collection<TaskState> taskStates) {
try (Closer closer = Closer.create()) {
Class<? extends CommitSequencePublisher> dataPublisherClass = (Class<? extends CommitSequencePublisher>) Class
.forName(datasetState
.getProp(ConfigurationKeys.DATA_PUBLISHER_TYPE, ConfigurationKeys.DEFAULT_DATA_PUBLISHER_TYPE));
CommitSequencePublisher publisher = (CommitSequencePublisher) closer
.register(DataPublisher.getInstance(dataPublisherClass, this.jobContext.getJobState()));
publisher.publish(taskStates);
return publisher.getCommitSequenceBuilder();
} catch (Throwable t) {
log.error("Failed to generate commit sequence", t);
setTaskFailureException(datasetState.getTaskStates(), t);
throw Throwables.propagate(t);
}
}
void checkForUnpublishedWUHandling(String datasetUrn, JobState.DatasetState datasetState,
Class<? extends DataPublisher> dataPublisherClass, Closer closer)
throws ReflectiveOperationException, IOException {
if (UnpublishedHandling.class.isAssignableFrom(dataPublisherClass)) {
// pass in jobstate to retrieve properties
DataPublisher publisher =
closer.register(DataPublisher.getInstance(dataPublisherClass, this.jobContext.getJobState()));
log.info(String.format("Calling publisher to handle unpublished work units for dataset %s of job %s.", datasetUrn,
this.jobContext.getJobId()));
((UnpublishedHandling) publisher).handleUnpublishedWorkUnits(datasetState.getTaskStatesAsWorkUnitStates());
}
}
private void finalizeDatasetState(JobState.DatasetState datasetState, String datasetUrn) {
Set<String> taskErrors = new HashSet<>();
for (TaskState taskState : datasetState.getTaskStates()) {
// Backoff the actual high watermark to the low watermark for each task that has not been committed
if (taskState.getWorkingState() != WorkUnitState.WorkingState.COMMITTED) {
taskState.backoffActualHighWatermark();
if (this.jobContext.getJobCommitPolicy() == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS) {
// Determine the final dataset state based on the task states (post commit) and the job commit policy.
// 1. If COMMIT_ON_FULL_SUCCESS is used, the processing of the dataset is considered failed if any
// task for the dataset failed to be committed.
// 2. Otherwise, the processing of the dataset is considered successful even if some tasks for the
// dataset failed to be committed.
datasetState.setState(JobState.RunningState.FAILED);
String taskStateException = taskState.getTaskFailureException().isPresent() ? taskState.getTaskFailureException().get() : "Exception not set.";
// Only print out the unique exceptions to avoid needless logging duplication on large datasets
if (!taskErrors.contains(taskStateException)) {
taskErrors.add(taskStateException);
log.warn("At least one task in {} did not get committed successfully. Setting dataset state to FAILED. {}", datasetUrn,
taskStateException);
}
}
}
}
datasetState.setId(datasetUrn);
}
/**
* Persist dataset state of a given dataset identified by the dataset URN.
*/
private void persistDatasetState(String datasetUrn, JobState.DatasetState datasetState)
throws IOException {
log.info("Persisting dataset state for dataset " + datasetUrn);
this.jobContext.getDatasetStateStore().persistDatasetState(datasetUrn, datasetState);
}
/**
* Sets the {@link ConfigurationKeys#TASK_FAILURE_EXCEPTION_KEY} for each given {@link TaskState} to the given
* {@link Throwable}.
*
* Make this method public as this exception catching routine can be reusable in other occasions as well.
*/
public static void setTaskFailureException(Collection<? extends WorkUnitState> taskStates, Throwable t) {
for (WorkUnitState taskState : taskStates) {
((TaskState) taskState).setTaskFailureException(t);
}
}
private static Optional<CommitStep> buildDatasetStateCommitStep(String datasetUrn,
JobState.DatasetState datasetState) {
log.info("Creating " + DatasetStateCommitStep.class.getSimpleName() + " for dataset " + datasetUrn);
return Optional.of(new DatasetStateCommitStep.Builder<>().withProps(datasetState).withDatasetUrn(datasetUrn)
.withDatasetState(datasetState).build());
}
}
| 1,369 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* A type of {@link java.lang.Exception} thrown when there is anything
* wrong with scheduling or running a job.
*/
public class JobException extends Exception {
private static final long serialVersionUID = -7377882385877284612L;
public JobException(String message, Throwable t) {
super(message, t);
}
public JobException(String message) {
super(message);
}
}
| 1,370 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/StateStoreMigrationCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicates;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.runtime.cli.CliApplication;
import org.apache.gobblin.runtime.cli.CliObjectFactory;
import org.apache.gobblin.runtime.cli.CliObjectOption;
import org.apache.gobblin.runtime.cli.CliObjectSupport;
import org.apache.gobblin.runtime.cli.ConstructorAndPublicMethodsCliObjectFactory;
import org.apache.gobblin.util.ConfigUtils;
import lombok.extern.slf4j.Slf4j;
import static org.apache.gobblin.configuration.ConfigurationKeys.*;
/**
* A script used for state store migration:
* In the case that users are willing to change the storage medium of job state due to some reasons.
*
* Current implementation doesn't support data awareness on either source or target side.
* And only migrate a single job state instead of migrating all history versions.
*/
@Slf4j
@Alias(value = "stateMigration", description = "Command line tools for migrating state store")
public class StateStoreMigrationCli implements CliApplication {
private static final String SOURCE_KEY = "source";
private static final String DESTINATION_KEY = "destination";
private static final String JOB_NAME_KEY = "jobName";
private static final String MIGRATE_ALL_JOBS = "migrateAllJobs";
private static final String DEFAULT_MIGRATE_ALL_JOBS = "false";
@Override
public void run(String[] args) throws Exception {
CliObjectFactory<Command> factory = new ConstructorAndPublicMethodsCliObjectFactory<>(Command.class);
Command command = factory.buildObject(args, 1, true, args[0]);
FileSystem fs = FileSystem.get(new Configuration());
FSDataInputStream inputStream = fs.open(command.path);
Config config = ConfigFactory.parseReader(new InputStreamReader(inputStream, Charset.defaultCharset()));
Preconditions.checkNotNull(config.getObject(SOURCE_KEY));
Preconditions.checkNotNull(config.getObject(DESTINATION_KEY));
DatasetStateStore dstDatasetStateStore =
DatasetStateStore.buildDatasetStateStore(config.getConfig(DESTINATION_KEY));
DatasetStateStore srcDatasetStateStore = DatasetStateStore.buildDatasetStateStore(config.getConfig(SOURCE_KEY));
Map<String, JobState.DatasetState> map;
// if migrating state for all jobs then list the store names (job names) and copy the current jst files
if (ConfigUtils.getBoolean(config, MIGRATE_ALL_JOBS, Boolean.valueOf(DEFAULT_MIGRATE_ALL_JOBS))) {
List<String> jobNames = srcDatasetStateStore.getStoreNames(Predicates.alwaysTrue());
for (String jobName : jobNames) {
migrateStateForJob(srcDatasetStateStore, dstDatasetStateStore, jobName, command.deleteSourceStateStore);
}
} else {
Preconditions.checkNotNull(config.getString(JOB_NAME_KEY));
migrateStateForJob(srcDatasetStateStore, dstDatasetStateStore, config.getString(JOB_NAME_KEY),
command.deleteSourceStateStore);
}
}
private static void migrateStateForJob(DatasetStateStore srcDatasetStateStore, DatasetStateStore dstDatasetStateStore,
String jobName, boolean deleteFromSource) throws IOException {
Map<String, JobState.DatasetState> map = srcDatasetStateStore.getLatestDatasetStatesByUrns(jobName);
for (Map.Entry<String, JobState.DatasetState> entry : map.entrySet()) {
dstDatasetStateStore.persistDatasetState(entry.getKey(), entry.getValue());
}
if (deleteFromSource) {
try {
srcDatasetStateStore.delete(jobName);
} catch (IOException ioe) {
log.warn("The source state store has been deleted", ioe);
}
}
}
/**
* This class has to been public static for being accessed by
* {@link ConstructorAndPublicMethodsCliObjectFactory#inferConstructorOptions}
*/
public static class Command {
private final Path path;
private boolean deleteSourceStateStore = false;
@CliObjectSupport(argumentNames = "configPath")
public Command(String path) throws URISyntaxException, IOException {
this.path = new Path(path);
}
@CliObjectOption
public void deleteSourceStateStore() {
this.deleteSourceStateStore = true;
}
}
}
| 1,371 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/Task.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.apache.commons.lang3.BooleanUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.NoArgsConstructor;
import org.apache.gobblin.Constructs;
import org.apache.gobblin.broker.EmptyKey;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.SpeculativeAttemptAwareConstruct;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.fork.CopyHelper;
import org.apache.gobblin.fork.CopyNotSupportedException;
import org.apache.gobblin.fork.Copyable;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractorBase;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractorDecorator;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.FailureEventBuilder;
import org.apache.gobblin.metrics.event.TaskEvent;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.SingleTaskDataPublisher;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyCheckResults;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyChecker;
import org.apache.gobblin.records.RecordStreamProcessor;
import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator;
import org.apache.gobblin.runtime.fork.AsynchronousFork;
import org.apache.gobblin.runtime.fork.Fork;
import org.apache.gobblin.runtime.fork.SynchronousFork;
import org.apache.gobblin.runtime.task.TaskIFace;
import org.apache.gobblin.runtime.util.ExceptionCleanupUtils;
import org.apache.gobblin.runtime.util.TaskMetrics;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.extractor.StreamingExtractor;
import org.apache.gobblin.state.ConstructState;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.TaskEventMetadataUtils;
import org.apache.gobblin.writer.AcknowledgableWatermark;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.FineGrainedWatermarkTracker;
import org.apache.gobblin.writer.TrackerBasedWatermarkManager;
import org.apache.gobblin.writer.WatermarkAwareWriter;
import org.apache.gobblin.writer.WatermarkManager;
import org.apache.gobblin.writer.WatermarkStorage;
/**
* A physical unit of execution for a Gobblin {@link org.apache.gobblin.source.workunit.WorkUnit}.
*
* <p>
* Each task is executed by a single thread in a thread pool managed by the {@link TaskExecutor}
* and each {@link Fork} of the task is executed in a separate thread pool also managed by the
* {@link TaskExecutor}.
*
* Each {@link Task} consists of the following steps:
* <ul>
* <li>Extracting, converting, and forking the source schema.</li>
* <li>Extracting, converting, doing row-level quality checking, and forking each data record.</li>
* <li>Putting each forked record into the record queue managed by each {@link Fork}.</li>
* <li>Committing output data of each {@link Fork} once all {@link Fork}s finish.</li>
* <li>Cleaning up and exiting.</li>
* </ul>
*
* Each {@link Fork} consists of the following steps:
* <ul>
* <li>Getting the next record off the record queue.</li>
* <li>Converting the record and doing row-level quality checking if applicable.</li>
* <li>Writing the record out if it passes the quality checking.</li>
* <li>Cleaning up and exiting once all the records have been processed.</li>
* </ul>
* </p>
*
* @author Yinan Li
*/
@NoArgsConstructor(force = true)
public class Task implements TaskIFace {
private static final Logger LOG = LoggerFactory.getLogger(Task.class);
private static final String TASK_STATE = "taskState";
private static final String FAILED_TASK_EVENT = "failedTask";
private final String jobId;
private final String taskId;
private final String taskKey;
private final boolean isIgnoreCloseFailures;
private final TaskContext taskContext;
private final TaskState taskState;
private final TaskStateTracker taskStateTracker;
private final TaskExecutor taskExecutor;
private final Optional<CountDownLatch> countDownLatch;
private final Map<Optional<Fork>, Optional<Future<?>>> forks = Maps.newLinkedHashMap();
// Number of task retries
private final AtomicInteger retryCount = new AtomicInteger();
private final Converter converter;
private final InstrumentedExtractorBase extractor;
private final RowLevelPolicyChecker rowChecker;
private final ExecutionModel taskMode;
private final Optional<WatermarkManager> watermarkManager;
private final Optional<FineGrainedWatermarkTracker> watermarkTracker;
private final Optional<WatermarkStorage> watermarkStorage;
private final List<RecordStreamProcessor<?,?,?,?>> recordStreamProcessors;
private final Closer closer;
private final TaskEventMetadataGenerator taskEventMetadataGenerator;
private long startTime;
private volatile long lastRecordPulledTimestampMillis;
private final AtomicLong recordsPulled;
private final AtomicBoolean shutdownRequested;
private final boolean shouldInterruptTaskOnCancel;
private volatile long shutdownRequestedTime = Long.MAX_VALUE;
private final CountDownLatch shutdownLatch;
protected Future<?> taskFuture;
/**
* Instantiate a new {@link Task}.
*
* @param context a {@link TaskContext} containing all necessary information to construct and run a {@link Task}
* @param taskStateTracker a {@link TaskStateTracker} for tracking task state
* @param taskExecutor a {@link TaskExecutor} for executing the {@link Task} and its {@link Fork}s
* @param countDownLatch an optional {@link java.util.concurrent.CountDownLatch} used to signal the task completion
*/
public Task(TaskContext context, TaskStateTracker taskStateTracker, TaskExecutor taskExecutor,
Optional<CountDownLatch> countDownLatch) {
this.taskContext = context;
this.taskState = context.getTaskState();
this.jobId = this.taskState.getJobId();
this.taskId = this.taskState.getTaskId();
this.taskKey = this.taskState.getTaskKey();
this.isIgnoreCloseFailures = this.taskState.getJobState().getPropAsBoolean(ConfigurationKeys.TASK_IGNORE_CLOSE_FAILURES, false);
this.shouldInterruptTaskOnCancel = this.taskState.getJobState().getPropAsBoolean(ConfigurationKeys.TASK_INTERRUPT_ON_CANCEL, true);
this.taskStateTracker = taskStateTracker;
this.taskExecutor = taskExecutor;
this.countDownLatch = countDownLatch;
this.closer = Closer.create();
this.closer.register(this.taskState.getTaskBrokerNullable());
this.extractor =
closer.register(new InstrumentedExtractorDecorator<>(this.taskState, this.taskContext.getExtractor()));
this.recordStreamProcessors = this.taskContext.getRecordStreamProcessors();
// add record stream processors to closer if they are closeable
for (RecordStreamProcessor r: recordStreamProcessors) {
if (r instanceof Closeable) {
this.closer.register((Closeable)r);
}
}
List<Converter<?,?,?,?>> converters = this.taskContext.getConverters();
this.converter = closer.register(new MultiConverter(converters));
// can't have both record stream processors and converter lists configured
try {
Preconditions.checkState(this.recordStreamProcessors.isEmpty() || converters.isEmpty(),
"Converters cannot be specified when RecordStreamProcessors are specified");
} catch (IllegalStateException e) {
try {
closer.close();
} catch (Throwable t) {
LOG.error("Failed to close all open resources", t);
}
throw new TaskInstantiationException("Converters cannot be specified when RecordStreamProcessors are specified");
}
try {
this.rowChecker = closer.register(this.taskContext.getRowLevelPolicyChecker());
} catch (Exception e) {
try {
closer.close();
} catch (Throwable t) {
LOG.error("Failed to close all open resources", t);
}
throw new RuntimeException("Failed to instantiate row checker.", e);
}
this.taskMode = getExecutionModel(this.taskState);
this.recordsPulled = new AtomicLong(0);
this.lastRecordPulledTimestampMillis = 0;
this.shutdownRequested = new AtomicBoolean(false);
this.shutdownLatch = new CountDownLatch(1);
// Setup Streaming constructs
if (isStreamingTask()) {
Extractor underlyingExtractor = this.taskContext.getRawSourceExtractor();
if (!(underlyingExtractor instanceof StreamingExtractor)) {
LOG.error(
"Extractor {} is not an instance of StreamingExtractor but the task is configured to run in continuous mode",
underlyingExtractor.getClass().getName());
throw new TaskInstantiationException("Extraction " + underlyingExtractor.getClass().getName()
+ " is not an instance of StreamingExtractor but the task is configured to run in continuous mode");
}
this.watermarkStorage = Optional.of(taskContext.getWatermarkStorage());
Config config;
try {
config = ConfigUtils.propertiesToConfig(taskState.getProperties());
} catch (Exception e) {
LOG.warn("Failed to deserialize taskState into Config.. continuing with an empty config", e);
config = ConfigFactory.empty();
}
long commitIntervalMillis = ConfigUtils.getLong(config,
TaskConfigurationKeys.STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS,
TaskConfigurationKeys.DEFAULT_STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS);
this.watermarkTracker = Optional.of(this.closer.register(new FineGrainedWatermarkTracker(config)));
this.watermarkManager = Optional.of((WatermarkManager) this.closer.register(
new TrackerBasedWatermarkManager(this.watermarkStorage.get(), this.watermarkTracker.get(),
commitIntervalMillis, Optional.of(this.LOG))));
} else {
this.watermarkManager = Optional.absent();
this.watermarkTracker = Optional.absent();
this.watermarkStorage = Optional.absent();
}
this.taskEventMetadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(taskState);
}
/**
* Try to get a {@link ForkThrowableHolder} instance from the given {@link SharedResourcesBroker}
*/
public static ForkThrowableHolder getForkThrowableHolder(SharedResourcesBroker<GobblinScopeTypes> broker) {
try {
return broker.getSharedResource(new ForkThrowableHolderFactory(), EmptyKey.INSTANCE);
} catch (NotConfiguredException e) {
LOG.error("Fail to get fork throwable holder instance from broker. Will not track fork exception.", e);
throw new RuntimeException(e);
}
}
public static ExecutionModel getExecutionModel(State state) {
String mode = state
.getProp(TaskConfigurationKeys.TASK_EXECUTION_MODE, TaskConfigurationKeys.DEFAULT_TASK_EXECUTION_MODE);
try {
return ExecutionModel.valueOf(mode.toUpperCase());
} catch (Exception e) {
LOG.warn("Could not find an execution model corresponding to {}, returning {}", mode, ExecutionModel.BATCH, e);
return ExecutionModel.BATCH;
}
}
protected boolean areSingleBranchTasksSynchronous(TaskContext taskContext) {
return BooleanUtils.toBoolean(taskContext.getTaskState()
.getProp(TaskConfigurationKeys.TASK_IS_SINGLE_BRANCH_SYNCHRONOUS, TaskConfigurationKeys.DEFAULT_TASK_IS_SINGLE_BRANCH_SYNCHRONOUS));
}
protected boolean isStreamingTask() {
return this.taskMode.equals(ExecutionModel.STREAMING);
}
public boolean awaitShutdown(long timeoutInMillis)
throws InterruptedException {
return this.shutdownLatch.await(timeoutInMillis, TimeUnit.MILLISECONDS);
}
private void completeShutdown() {
this.shutdownLatch.countDown();
}
private boolean shutdownRequested() {
if (!this.shutdownRequested.get()) {
this.shutdownRequested.set(Thread.currentThread().isInterrupted());
}
return this.shutdownRequested.get();
}
public void shutdown() {
this.shutdownRequested.set(true);
this.shutdownRequestedTime = Math.min(System.currentTimeMillis(), this.shutdownRequestedTime);
}
public String getProgress() {
long currentTime = System.currentTimeMillis();
long lastRecordTimeElapsed = currentTime - this.lastRecordPulledTimestampMillis;
if (isStreamingTask()) {
WatermarkManager.CommitStatus commitStatus = this.watermarkManager.get().getCommitStatus();
long lastWatermarkCommitTimeElapsed = currentTime - commitStatus.getLastWatermarkCommitSuccessTimestampMillis();
String progressString = String.format("recordsPulled:%d, lastRecordExtracted: %d ms ago, "
+ "lastWatermarkCommitted: %d ms ago, lastWatermarkCommitted: %s", this.recordsPulled.get(),
lastRecordTimeElapsed, lastWatermarkCommitTimeElapsed, commitStatus.getLastCommittedWatermarks());
return progressString;
} else {
String progressString = String
.format("recordsPulled:%d, lastRecordExtracted: %d ms ago", this.recordsPulled.get(), lastRecordTimeElapsed);
return progressString;
}
}
@Override
@SuppressWarnings("unchecked")
public void run() {
MDC.put(ConfigurationKeys.TASK_KEY_KEY, this.taskKey);
this.startTime = System.currentTimeMillis();
this.taskState.setStartTime(startTime);
this.taskState.setWorkingState(WorkUnitState.WorkingState.RUNNING);
// Clear the map so it starts with a fresh set of forks for each run/retry
this.forks.clear();
try {
if (this.taskState.getPropAsBoolean(ConfigurationKeys.TASK_SYNCHRONOUS_EXECUTION_MODEL_KEY,
ConfigurationKeys.DEFAULT_TASK_SYNCHRONOUS_EXECUTION_MODEL)) {
LOG.warn("Synchronous task execution model is deprecated. Please consider using stream model.");
runSynchronousModel();
} else {
new StreamModelTaskRunner(this, this.taskState, this.closer, this.taskContext, this.extractor,
this.converter, this.recordStreamProcessors, this.rowChecker, this.taskExecutor, this.taskMode, this.shutdownRequested,
this.watermarkTracker, this.watermarkManager, this.watermarkStorage, this.forks).run();
}
LOG.info("Extracted " + this.recordsPulled + " data records");
LOG.info("Row quality checker finished with results: " + this.rowChecker.getResults().getResults());
this.taskState.setProp(ConfigurationKeys.EXTRACTOR_ROWS_EXTRACTED, this.recordsPulled);
this.taskState.setProp(ConfigurationKeys.EXTRACTOR_ROWS_EXPECTED, extractor.getExpectedRecordCount());
// If there are folks not successfully being executed, get the aggregated exceptions and throw RuntimeException.
if (!this.forks.keySet().stream().map(Optional::get).allMatch(Fork::isSucceeded)) {
List<Integer> failedForksId = this.forks.keySet().stream().map(Optional::get).
filter(not(Fork::isSucceeded)).map(x -> x.getIndex()).collect(Collectors.toList());
ForkThrowableHolder holder = Task.getForkThrowableHolder(this.taskState.getTaskBroker());
Throwable e = null;
if (!holder.isEmpty()) {
if (failedForksId.size() == 1 && holder.getThrowable(failedForksId.get(0)).isPresent()) {
e = holder.getThrowable(failedForksId.get(0)).get();
}else{
e = holder.getAggregatedException(failedForksId, this.taskId);
}
}
throw e == null ? new RuntimeException("Some forks failed") : e;
}
//TODO: Move these to explicit shutdown phase
if (watermarkManager.isPresent()) {
watermarkManager.get().close();
}
if (watermarkTracker.isPresent()) {
watermarkTracker.get().close();
}
} catch (Throwable t) {
failTask(t);
} finally {
synchronized (this) {
this.taskStateTracker.onTaskRunCompletion(this);
completeShutdown();
this.taskFuture = null;
}
}
}
/**
* TODO: Remove this method after Java-11 as JDK offers similar built-in solution.
*/
public static <T> Predicate<T> not(Predicate<T> t) {
return t.negate();
}
@Deprecated
private void runSynchronousModel() throws Exception {
// Get the fork operator. By default IdentityForkOperator is used with a single branch.
ForkOperator forkOperator = closer.register(this.taskContext.getForkOperator());
forkOperator.init(this.taskState);
int branches = forkOperator.getBranches(this.taskState);
// Set fork.branches explicitly here so the rest task flow can pick it up
this.taskState.setProp(ConfigurationKeys.FORK_BRANCHES_KEY, branches);
// Extract, convert, and fork the source schema.
Object schema = converter.convertSchema(extractor.getSchema(), this.taskState);
List<Boolean> forkedSchemas = forkOperator.forkSchema(this.taskState, schema);
if (forkedSchemas.size() != branches) {
throw new ForkBranchMismatchException(String
.format("Number of forked schemas [%d] is not equal to number of branches [%d]", forkedSchemas.size(),
branches));
}
if (inMultipleBranches(forkedSchemas) && !(CopyHelper.isCopyable(schema))) {
throw new CopyNotSupportedException(schema + " is not copyable");
}
RowLevelPolicyCheckResults rowResults = new RowLevelPolicyCheckResults();
if (!areSingleBranchTasksSynchronous(this.taskContext) || branches > 1) {
// Create one fork for each forked branch
for (int i = 0; i < branches; i++) {
if (forkedSchemas.get(i)) {
AsynchronousFork fork = closer.register(
new AsynchronousFork(this.taskContext, schema instanceof Copyable ? ((Copyable) schema).copy() : schema,
branches, i, this.taskMode));
configureStreamingFork(fork);
// Run the Fork
this.forks.put(Optional.<Fork>of(fork), Optional.<Future<?>>of(this.taskExecutor.submit(fork)));
} else {
this.forks.put(Optional.<Fork>absent(), Optional.<Future<?>> absent());
}
}
} else {
SynchronousFork fork = closer.register(
new SynchronousFork(this.taskContext, schema instanceof Copyable ? ((Copyable) schema).copy() : schema,
branches, 0, this.taskMode));
configureStreamingFork(fork);
this.forks.put(Optional.<Fork>of(fork), Optional.<Future<?>> of(this.taskExecutor.submit(fork)));
}
LOG.info("Task mode streaming = " + isStreamingTask());
if (isStreamingTask()) {
// Start watermark manager and tracker
if (this.watermarkTracker.isPresent()) {
this.watermarkTracker.get().start();
}
this.watermarkManager.get().start();
((StreamingExtractor) this.taskContext.getRawSourceExtractor()).start(this.watermarkStorage.get());
RecordEnvelope recordEnvelope;
// Extract, convert, and fork one source record at a time.
while ((recordEnvelope = extractor.readRecordEnvelope()) != null) {
onRecordExtract();
AcknowledgableWatermark ackableWatermark = new AcknowledgableWatermark(recordEnvelope.getWatermark());
if (watermarkTracker.isPresent()) {
watermarkTracker.get().track(ackableWatermark);
}
for (Object convertedRecord : converter.convertRecord(schema, recordEnvelope, this.taskState)) {
processRecord(convertedRecord, forkOperator, rowChecker, rowResults, branches,
ackableWatermark.incrementAck());
}
ackableWatermark.ack();
if (shutdownRequested()) {
extractor.shutdown();
}
}
} else {
RecordEnvelope record;
// Extract, convert, and fork one source record at a time.
long errRecords = 0;
while ((record = extractor.readRecordEnvelope()) != null) {
onRecordExtract();
try {
for (Object convertedRecord : converter.convertRecord(schema, record.getRecord(), this.taskState)) {
processRecord(convertedRecord, forkOperator, rowChecker, rowResults, branches, null);
}
} catch (Exception e) {
if (!(e instanceof DataConversionException) && !(e.getCause() instanceof DataConversionException)) {
LOG.error("Processing record incurs an unexpected exception: ", e);
throw new RuntimeException(e.getCause());
}
errRecords++;
if (errRecords > this.taskState.getPropAsLong(TaskConfigurationKeys.TASK_SKIP_ERROR_RECORDS,
TaskConfigurationKeys.DEFAULT_TASK_SKIP_ERROR_RECORDS)) {
throw new RuntimeException(e);
}
}
if (shutdownRequested()) {
extractor.shutdown();
}
}
}
LOG.info("Extracted " + this.recordsPulled + " data records");
LOG.info("Row quality checker finished with results: " + rowResults.getResults());
this.taskState.setProp(ConfigurationKeys.EXTRACTOR_ROWS_EXTRACTED, this.recordsPulled);
this.taskState.setProp(ConfigurationKeys.EXTRACTOR_ROWS_EXPECTED, extractor.getExpectedRecordCount());
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent()) {
// Tell the fork that the main branch is completed and no new incoming data records should be expected
fork.get().markParentTaskDone();
}
}
for (Optional<Future<?>> forkFuture : this.forks.values()) {
if (forkFuture.isPresent()) {
try {
long forkFutureStartTime = System.nanoTime();
forkFuture.get().get();
long forkDuration = System.nanoTime() - forkFutureStartTime;
LOG.info("Task shutdown: Fork future reaped in {} millis", forkDuration / 1000000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
}
protected void configureStreamingFork(Fork fork) throws IOException {
if (isStreamingTask()) {
DataWriter forkWriter = fork.getWriter();
boolean isWaterMarkAwareWriter = (forkWriter instanceof WatermarkAwareWriter)
&& ((WatermarkAwareWriter) forkWriter).isWatermarkCapable();
if (!isWaterMarkAwareWriter) {
String errorMessage = String.format("The Task is configured to run in continuous mode, "
+ "but the writer %s is not a WatermarkAwareWriter", forkWriter.getClass().getName());
LOG.error(errorMessage);
throw new RuntimeException(errorMessage);
}
}
}
protected void onRecordExtract() {
this.recordsPulled.incrementAndGet();
this.lastRecordPulledTimestampMillis = System.currentTimeMillis();
}
protected void failTask(Throwable t) {
Throwable cleanedException = ExceptionCleanupUtils.removeEmptyWrappers(t);
LOG.error(String.format("Task %s failed", this.taskId), cleanedException);
this.taskState.setWorkingState(WorkUnitState.WorkingState.FAILED);
this.taskState
.setProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY, Throwables.getStackTraceAsString(cleanedException));
// Send task failure event
FailureEventBuilder failureEvent = new FailureEventBuilder(FAILED_TASK_EVENT);
failureEvent.setRootCause(cleanedException);
failureEvent.addMetadata(TASK_STATE, this.taskState.toString());
failureEvent.addAdditionalMetadata(this.taskEventMetadataGenerator.getMetadata(this.taskState, failureEvent.getName()));
failureEvent.submit(taskContext.getTaskMetrics().getMetricContext());
}
/**
* Whether the task should directly publish its output data to the final publisher output directory.
*
* <p>
* The task should publish its output data directly if {@link ConfigurationKeys#PUBLISH_DATA_AT_JOB_LEVEL}
* is set to false AND any of the following conditions is satisfied:
*
* <ul>
* <li>The {@link JobCommitPolicy#COMMIT_ON_PARTIAL_SUCCESS} policy is used.</li>
* <li>The {@link JobCommitPolicy#COMMIT_SUCCESSFUL_TASKS} policy is used. and all {@link Fork}s of this
* {@link Task} succeeded.</li>
* </ul>
* </p>
*/
private boolean shouldPublishDataInTask() {
boolean publishDataAtJobLevel = this.taskState.getPropAsBoolean(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL,
ConfigurationKeys.DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL);
if (publishDataAtJobLevel) {
LOG.info(String
.format("%s is true. Will publish data at the job level.", ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL));
return false;
}
JobCommitPolicy jobCommitPolicy = JobCommitPolicy.getCommitPolicy(this.taskState);
if (jobCommitPolicy == JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS) {
return this.taskState.getWorkingState() == WorkUnitState.WorkingState.SUCCESSFUL;
}
if (jobCommitPolicy == JobCommitPolicy.COMMIT_ON_PARTIAL_SUCCESS) {
return true;
}
LOG.info("Will publish data at the job level with job commit policy: " + jobCommitPolicy);
return false;
}
private void publishTaskData()
throws IOException {
Closer closer = Closer.create();
try {
Class<? extends DataPublisher> dataPublisherClass = getTaskPublisherClass();
SingleTaskDataPublisher publisher =
closer.register(SingleTaskDataPublisher.getInstance(dataPublisherClass, this.taskState));
LOG.info("Publishing data from task " + this.taskId);
publisher.publish(this.taskState);
} catch (ClassCastException e) {
LOG.error(String.format("To publish data in task, the publisher class must extend %s",
SingleTaskDataPublisher.class.getSimpleName()), e);
this.taskState.setTaskFailureException(e);
throw closer.rethrow(e);
} catch (Throwable t) {
this.taskState.setTaskFailureException(t);
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@SuppressWarnings("unchecked")
private Class<? extends DataPublisher> getTaskPublisherClass()
throws ReflectiveOperationException {
if (this.taskState.contains(ConfigurationKeys.TASK_DATA_PUBLISHER_TYPE)) {
return (Class<? extends DataPublisher>) Class
.forName(this.taskState.getProp(ConfigurationKeys.TASK_DATA_PUBLISHER_TYPE));
}
return (Class<? extends DataPublisher>) Class.forName(
this.taskState.getProp(ConfigurationKeys.DATA_PUBLISHER_TYPE, ConfigurationKeys.DEFAULT_DATA_PUBLISHER_TYPE));
}
/** Get the ID of the job this {@link Task} belongs to.
*
* @return ID of the job this {@link Task} belongs to.
*/
public String getJobId() {
return this.jobId;
}
/**
* Get the ID of this task.
*
* @return ID of this task
*/
public String getTaskId() {
return this.taskId;
}
/**
* Get the key of this task.
*
* @return Key of this task
*/
public String getTaskKey() {
return this.taskKey;
}
/**
* Get the {@link TaskContext} associated with this task.
*
* @return {@link TaskContext} associated with this task
*/
public TaskContext getTaskContext() {
return this.taskContext;
}
/**
* Get the state of this task.
*
* @return state of this task
*/
public TaskState getTaskState() {
return this.taskState;
}
@Override
public State getPersistentState() {
return getTaskState();
}
@Override
public State getExecutionMetadata() {
return getTaskState();
}
@Override
public WorkUnitState.WorkingState getWorkingState() {
return getTaskState().getWorkingState();
}
/**
* Get the list of {@link Fork}s created by this {@link Task}.
*
* @return the list of {@link Fork}s created by this {@link Task}
*/
public List<Optional<Fork>> getForks() {
return ImmutableList.copyOf(this.forks.keySet());
}
/**
* Update record-level metrics.
*/
public void updateRecordMetrics() {
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent()) {
fork.get().updateRecordMetrics();
}
}
}
/**
* Update byte-level metrics.
*
* <p>
* This method is only supposed to be called after the writer commits.
* </p>
*/
public void updateByteMetrics() {
try {
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent()) {
fork.get().updateByteMetrics();
}
}
} catch (IOException ioe) {
LOG.error("Failed to update byte-level metrics for task " + this.taskId, ioe);
}
}
/**
* Increment the retry count of this task.
*/
public void incrementRetryCount() {
this.retryCount.incrementAndGet();
}
/**
* Get the number of times this task has been retried.
*
* @return number of times this task has been retried
*/
public int getRetryCount() {
return this.retryCount.get();
}
/**
* Mark the completion of this {@link Task}.
*/
public void markTaskCompletion() {
if (this.countDownLatch.isPresent()) {
this.countDownLatch.get().countDown();
}
this.taskState.setProp(ConfigurationKeys.TASK_RETRIES_KEY, this.retryCount.get());
}
@Override
public String toString() {
return this.taskId;
}
/**
* Process a (possibly converted) record.
*/
@SuppressWarnings("unchecked")
private void processRecord(Object convertedRecord, ForkOperator forkOperator, RowLevelPolicyChecker rowChecker,
RowLevelPolicyCheckResults rowResults, int branches, AcknowledgableWatermark watermark)
throws Exception {
// Skip the record if quality checking fails
if (!rowChecker.executePolicies(convertedRecord, rowResults)) {
if (watermark != null) {
watermark.ack();
}
return;
}
List<Boolean> forkedRecords = forkOperator.forkDataRecord(this.taskState, convertedRecord);
if (forkedRecords.size() != branches) {
throw new ForkBranchMismatchException(String
.format("Number of forked data records [%d] is not equal to number of branches [%d]", forkedRecords.size(),
branches));
}
boolean needToCopy = inMultipleBranches(forkedRecords);
// we only have to copy a record if it needs to go into multiple forks
if (needToCopy && !(CopyHelper.isCopyable(convertedRecord))) {
throw new CopyNotSupportedException(convertedRecord.getClass().getName() + " is not copyable");
}
int branch = 0;
int copyInstance = 0;
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent() && forkedRecords.get(branch)) {
Object recordForFork = needToCopy ? CopyHelper.copy(convertedRecord) : convertedRecord;
copyInstance++;
if (isStreamingTask()) {
// Send the record, watermark pair down the fork
((RecordEnvelope) recordForFork).addCallBack(watermark.incrementAck());
}
// Put the record into the record queue of each fork. A put may timeout and return a false, in which
// case the put is retried until it is successful.
boolean succeeded = false;
while (!succeeded) {
succeeded = fork.get().putRecord(recordForFork);
}
}
branch++;
}
if (watermark != null) {
watermark.ack();
}
}
/**
* Check if a schema or data record is being passed to more than one branches.
*/
private static boolean inMultipleBranches(List<Boolean> branches) {
int inBranches = 0;
for (Boolean bool : branches) {
if (bool && ++inBranches > 1) {
break;
}
}
return inBranches > 1;
}
/**
* Get the total number of records written by every {@link Fork}s of this {@link Task}.
*
* @return the number of records written by every {@link Fork}s of this {@link Task}
*/
private long getRecordsWritten() {
long recordsWritten = 0;
for (Optional<Fork> fork : this.forks.keySet()) {
recordsWritten += fork.get().getRecordsWritten();
}
return recordsWritten;
}
/**
* Get the total number of bytes written by every {@link Fork}s of this {@link Task}.
*
* @return the number of bytes written by every {@link Fork}s of this {@link Task}
*/
private long getBytesWritten() {
long bytesWritten = 0;
for (Optional<Fork> fork : this.forks.keySet()) {
bytesWritten += fork.get().getBytesWritten();
}
return bytesWritten;
}
/**
* Get the final state of each construct used by this task and add it to the {@link org.apache.gobblin.runtime.TaskState}.
* @param extractor the {@link org.apache.gobblin.instrumented.extractor.InstrumentedExtractorBase} used by this task.
* @param converter the {@link org.apache.gobblin.converter.Converter} used by this task.
* @param rowChecker the {@link RowLevelPolicyChecker} used by this task.
*/
private void addConstructsFinalStateToTaskState(InstrumentedExtractorBase<?, ?> extractor,
Converter<?, ?, ?, ?> converter, RowLevelPolicyChecker rowChecker) {
ConstructState constructState = new ConstructState();
if (extractor != null) {
constructState.addConstructState(Constructs.EXTRACTOR, new ConstructState(extractor.getFinalState()));
}
if (converter != null) {
constructState.addConstructState(Constructs.CONVERTER, new ConstructState(converter.getFinalState()));
}
if (rowChecker != null) {
constructState.addConstructState(Constructs.ROW_QUALITY_CHECKER, new ConstructState(rowChecker.getFinalState()));
}
int forkIdx = 0;
for (Optional<Fork> fork : this.forks.keySet()) {
constructState.addConstructState(Constructs.FORK_OPERATOR, new ConstructState(fork.get().getFinalState()),
Integer.toString(forkIdx));
forkIdx++;
}
constructState.mergeIntoWorkUnitState(this.taskState);
}
/**
* Commit this task by doing the following things:
* 1. Committing each fork by {@link Fork#commit()}.
* 2. Update final state of construct in {@link #taskState}.
* 3. Check whether to publish data in task.
*/
public void commit() {
boolean isTaskFailed = false;
try {
// Check if all forks succeeded
List<Integer> failedForkIds = new ArrayList<>();
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent()) {
if (fork.get().isSucceeded()) {
if (!fork.get().commit()) {
failedForkIds.add(fork.get().getIndex());
}
} else {
failedForkIds.add(fork.get().getIndex());
}
}
}
if (failedForkIds.size() == 0) {
// Set the task state to SUCCESSFUL. The state is not set to COMMITTED
// as the data publisher will do that upon successful data publishing.
if (this.taskState.getWorkingState() != WorkUnitState.WorkingState.FAILED) {
this.taskState.setWorkingState(WorkUnitState.WorkingState.SUCCESSFUL);
}
}
else {
ForkThrowableHolder holder = Task.getForkThrowableHolder(this.taskState.getTaskBroker());
LOG.info("Holder for this task {} is {}", this.taskId, holder);
if (!holder.isEmpty()) {
if (failedForkIds.size() == 1 && holder.getThrowable(failedForkIds.get(0)).isPresent()) {
failTask(holder.getThrowable(failedForkIds.get(0)).get());
} else {
failTask(holder.getAggregatedException(failedForkIds, this.taskId));
}
} else {
// just in case there are some corner cases where Fork throw an exception but doesn't add into holder
failTask(new ForkException("Fork branches " + failedForkIds + " failed for task " + this.taskId));
}
}
} catch (Throwable t) {
failTask(t);
isTaskFailed = true;
} finally {
addConstructsFinalStateToTaskState(extractor, converter, rowChecker);
this.taskState.setProp(ConfigurationKeys.WRITER_RECORDS_WRITTEN, getRecordsWritten());
this.taskState.setProp(ConfigurationKeys.WRITER_BYTES_WRITTEN, getBytesWritten());
this.submitTaskCommittedEvent();
try {
closer.close();
} catch (Throwable t) {
LOG.error("Failed to close all open resources", t);
if ((!isIgnoreCloseFailures) && (!isTaskFailed)) {
LOG.error("Setting the task state to failed.");
failTask(t);
}
}
for (Map.Entry<Optional<Fork>, Optional<Future<?>>> forkAndFuture : this.forks.entrySet()) {
if (forkAndFuture.getKey().isPresent() && forkAndFuture.getValue().isPresent()) {
try {
forkAndFuture.getValue().get().cancel(true);
} catch (Throwable t) {
LOG.error(String.format("Failed to cancel Fork \"%s\"", forkAndFuture.getKey().get()), t);
}
}
}
try {
if (shouldPublishDataInTask()) {
// If data should be published by the task, publish the data and set the task state to COMMITTED.
// Task data can only be published after all forks have been closed by closer.close().
if (this.taskState.getWorkingState() == WorkUnitState.WorkingState.SUCCESSFUL) {
publishTaskData();
this.taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
}
}
} catch (IOException ioe) {
failTask(ioe);
} finally {
long endTime = System.currentTimeMillis();
this.taskState.setEndTime(endTime);
this.taskState.setTaskDuration(endTime - startTime);
this.taskStateTracker.onTaskCommitCompletion(this);
}
}
}
protected void submitTaskCommittedEvent() {
MetricContext taskMetricContext = TaskMetrics.get(this.taskState).getMetricContext();
EventSubmitter eventSubmitter = new EventSubmitter.Builder(taskMetricContext, "gobblin.runtime.task").build();
Map<String, String> metadataMap = Maps.newHashMap();
metadataMap.putAll(this.taskEventMetadataGenerator.getMetadata(this.taskState, TaskEvent.TASK_COMMITTED_EVENT_NAME));
metadataMap.putAll(ImmutableMap
.of(TaskEvent.METADATA_TASK_ID, this.taskId, TaskEvent.METADATA_TASK_ATTEMPT_ID,
this.taskState.getTaskAttemptId().or("")));
eventSubmitter.submit(TaskEvent.TASK_COMMITTED_EVENT_NAME, metadataMap);
}
/**
* @return true if the current {@link Task} is safe to have duplicate attempts; false, otherwise.
*/
public boolean isSpeculativeExecutionSafe() {
if (this.extractor instanceof SpeculativeAttemptAwareConstruct) {
if (!((SpeculativeAttemptAwareConstruct) this.extractor).isSpeculativeAttemptSafe()) {
return false;
}
}
if (this.converter instanceof SpeculativeAttemptAwareConstruct) {
if (!((SpeculativeAttemptAwareConstruct) this.converter).isSpeculativeAttemptSafe()) {
return false;
}
}
for (Optional<Fork> fork : this.forks.keySet()) {
if (fork.isPresent() && !fork.get().isSpeculativeExecutionSafe()) {
return false;
}
}
return true;
}
public synchronized void setTaskFuture(Future<?> taskFuture) {
this.taskFuture = taskFuture;
}
@VisibleForTesting
boolean hasTaskFuture() {
return this.taskFuture != null;
}
/**
* return true if the task is successfully cancelled.
* @return
*/
public synchronized boolean cancel() {
LOG.info("Calling task cancel with interrupt flag: {}", this.shouldInterruptTaskOnCancel);
if (this.taskFuture != null && this.taskFuture.cancel(this.shouldInterruptTaskOnCancel)) {
// Make sure to mark running task as done
this.taskStateTracker.onTaskCommitCompletion(this);
return true;
} else {
return false;
}
}
}
| 1,372 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ForkThrowableHolderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import org.apache.gobblin.broker.EmptyKey;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
public class ForkThrowableHolderFactory implements SharedResourceFactory<ForkThrowableHolder, EmptyKey, GobblinScopeTypes> {
@Override
public String getName() {
return ForkThrowableHolderFactory.class.getName();
}
@Override
public SharedResourceFactoryResponse<ForkThrowableHolder> createResource(
SharedResourcesBroker<GobblinScopeTypes> broker, ScopedConfigView<GobblinScopeTypes, EmptyKey> config)
throws NotConfiguredException {
return new ResourceInstance<>(new ForkThrowableHolder());
}
@Override
public GobblinScopeTypes getAutoScope(SharedResourcesBroker<GobblinScopeTypes> broker,
ConfigView<GobblinScopeTypes, EmptyKey> config) {
return broker.selfScope().getType();
}
}
| 1,373 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/RateBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.concurrent.TimeUnit;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.RateBasedLimiter}.
*/
@Deprecated
public class RateBasedLimiter extends org.apache.gobblin.util.limiter.RateBasedLimiter {
public RateBasedLimiter(double rateLimit) {
super(rateLimit);
}
public RateBasedLimiter(double rateLimit, TimeUnit timeUnit) {
super(rateLimit, timeUnit);
}
}
| 1,374 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/NonRefillableLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* {@inheritDoc}
*
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.NonRefillableLimiter}.
*/
@Deprecated
public abstract class NonRefillableLimiter extends org.apache.gobblin.util.limiter.NonRefillableLimiter {
}
| 1,375 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/StateStoreBasedWatermarkStorageCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.cli.CliApplication;
/**
* A Cli to inspect streaming watermarks.
*/
@Alias(value = "watermarks", description = "Inspect streaming watermarks")
@Slf4j
public class StateStoreBasedWatermarkStorageCli implements CliApplication {
private static final Option HELP = Option.builder("h").longOpt("help").build();
private static final Option ZK = Option.builder("z").longOpt("zk")
.desc("Zk connect string").hasArg().build();
private static final Option JOB_NAME = Option.builder("j").longOpt("jobName")
.desc("The Job name").hasArg().build();
private static final Option ROOT_DIR = Option.builder("r").longOpt("rootDir")
.desc("The State Store Root Directory").hasArg().build();
private static final Option WATCH = Option.builder("w").longOpt("watch")
.desc("Watch the watermarks").build();
@Override
public void run(String[] args) {
Options options = new Options();
options.addOption(HELP);
options.addOption(ZK);
options.addOption(JOB_NAME);
options.addOption(ROOT_DIR);
options.addOption(WATCH);
CommandLine cli;
try {
CommandLineParser parser = new DefaultParser();
cli = parser.parse(options, Arrays.copyOfRange(args, 1, args.length));
} catch (ParseException pe) {
System.out.println( "Command line parse exception: " + pe.getMessage() );
return;
}
if (cli.hasOption(HELP.getOpt())) {
printUsage(options);
return;
}
TaskState taskState = new TaskState();
String jobName;
if (!cli.hasOption(JOB_NAME.getOpt())) {
log.error("Need Job Name to be specified --", JOB_NAME.getLongOpt());
throw new RuntimeException("Need Job Name to be specified");
} else {
jobName = cli.getOptionValue(JOB_NAME.getOpt());
log.info("Using job name: {}", jobName);
}
taskState.setProp(ConfigurationKeys.JOB_NAME_KEY, jobName);
String zkAddress = "locahost:2181";
if (cli.hasOption(ZK.getOpt())) {
zkAddress = cli.getOptionValue(ZK.getOpt());
}
log.info("Using zk address : {}", zkAddress);
taskState.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_TYPE_KEY, "zk");
taskState.setProp("state.store.zk.connectString", zkAddress);
if (cli.hasOption(ROOT_DIR.getOpt())) {
String rootDir = cli.getOptionValue(ROOT_DIR.getOpt());
taskState.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_CONFIG_PREFIX
+ ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, rootDir);
log.info("Setting root dir to {}", rootDir);
} else {
log.error("Need root directory specified");
printUsage(options);
return;
}
StateStoreBasedWatermarkStorage stateStoreBasedWatermarkStorage = new StateStoreBasedWatermarkStorage(taskState);
final AtomicBoolean stop = new AtomicBoolean(true);
if (cli.hasOption(WATCH.getOpt())) {
stop.set(false);
}
try {
if (!stop.get()) {
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
stop.set(true);
}
});
}
do {
boolean foundWatermark = false;
try {
for (CheckpointableWatermarkState wmState : stateStoreBasedWatermarkStorage.getAllCommittedWatermarks()) {
foundWatermark = true;
System.out.println(wmState.getProperties());
}
} catch (IOException ie) {
Throwables.propagate(ie);
}
if (!foundWatermark) {
System.out.println("No watermarks found.");
}
if (!stop.get()) {
Thread.sleep(1000);
}
} while (!stop.get());
} catch (Exception e) {
Throwables.propagate(e);
}
}
private void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.setOptionComparator(new Comparator<Option>() {
@Override
public int compare(Option o1, Option o2) {
if (o1.isRequired() && !o2.isRequired()) {
return -1;
}
if (!o1.isRequired() && o2.isRequired()) {
return 1;
}
return o1.getOpt().compareTo(o2.getOpt());
}
});
String usage = "gobblin watermarks ";
formatter.printHelp(usage, options);
}
}
| 1,376 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/BaseLimiterType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* @deprecated This class has been moved to {@link org.apache.gobblin.util.limiter.BaseLimiterType}.
*/
@Deprecated
public enum BaseLimiterType {
/**
* For {@link RateBasedLimiter}.
*/
RATE_BASED("rate"),
/**
* For {@link TimeBasedLimiter}.
*/
TIME_BASED("time"),
/**
* For {@link CountBasedLimiter}.
*/
COUNT_BASED("count"),
/**
* For {@link PoolBasedLimiter}.
*/
POOL_BASED("pool");
private final String name;
BaseLimiterType(String name) {
this.name = name;
}
@Override
public String toString() {
return this.name;
}
/**
* Get a {@link BaseLimiterType} for the given name.
*
* @param name the given name
* @return a {@link BaseLimiterType} for the given name
*/
public static BaseLimiterType forName(String name) {
if (name.equalsIgnoreCase(RATE_BASED.name)) {
return RATE_BASED;
}
if (name.equalsIgnoreCase(TIME_BASED.name)) {
return TIME_BASED;
}
if (name.equalsIgnoreCase(COUNT_BASED.name)) {
return COUNT_BASED;
}
if (name.equalsIgnoreCase(POOL_BASED.name)) {
return POOL_BASED;
}
throw new IllegalArgumentException("No Limiter implementation available for name: " + name);
}
}
| 1,377 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/SourceDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.google.common.eventbus.EventBus;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.gobblin.source.InfiniteSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Throwables;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.source.WorkUnitStreamSource;
import org.apache.gobblin.source.workunit.WorkUnitStream;
/**
* A decorator class for {@link org.apache.gobblin.source.Source} that catches any
* possible exceptions/errors thrown by the {@link org.apache.gobblin.source.Source}.
*
* @author Yinan Li
*/
public class SourceDecorator<S, D> implements WorkUnitStreamSource<S, D>, Decorator {
private static final Logger LOG = LoggerFactory.getLogger(SourceDecorator.class);
private final Source<S, D> source;
private final String jobId;
private final Logger logger;
public SourceDecorator(Source<S, D> source, String jobId, Logger logger) {
this.source = source;
this.jobId = jobId;
this.logger = null != logger ? logger : LOG;
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
List<WorkUnit> workUnits = this.source.getWorkunits(state);
if (workUnits == null) {
// Return an empty list if no work units are returned by the source
return Collections.emptyList();
}
return workUnits;
} catch (Throwable t) {
this.logger.error("Failed to get work units for job " + this.jobId, t);
// Return null in case of errors
return null;
}
}
public EventBus getEventBus() {
if (this.getDecoratedObject() instanceof InfiniteSource) {
return ((InfiniteSource) this.getDecoratedObject()).getEventBus();
} else if (this.getDecoratedObject() instanceof SourceDecorator) {
return ((SourceDecorator) this.getDecoratedObject()).getEventBus();
}
return null;
}
@Override
public WorkUnitStream getWorkunitStream(SourceState state) {
try {
if (this.source instanceof WorkUnitStreamSource) {
return ((WorkUnitStreamSource) this.source).getWorkunitStream(state);
}
List<WorkUnit> workUnits = this.source.getWorkunits(state);
if (workUnits == null) {
// Return an empty list if no work units are returned by the source
workUnits = Collections.emptyList();
}
return new BasicWorkUnitStream.Builder(workUnits).build();
} catch (Throwable t) {
this.logger.error("Failed to get work units for job " + this.jobId, t);
// Return null in case of errors
return null;
}
}
@Override
public Extractor<S, D> getExtractor(WorkUnitState state) throws IOException {
try {
return this.source.getExtractor(state);
} catch (Throwable t) {
this.logger.error("Failed to get extractor for job " + this.jobId, t);
Throwables.propagate(t);
// Dummy return that is not reachable as propagate above throws RuntimeException
return null;
}
}
@Override
public void shutdown(SourceState state) {
try {
this.source.shutdown(state);
} catch (Throwable t) {
this.logger.error("Failed to shutdown source for job " + this.jobId, t);
}
}
@Override
public Object getDecoratedObject() {
return this.source;
}
@Override
public boolean isEarlyStopped() {
return this.source.isEarlyStopped();
}
}
| 1,378 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobLauncherFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nonnull;
import lombok.extern.slf4j.Slf4j;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.local.LocalJobLauncher;
import org.apache.gobblin.runtime.mapreduce.MRJobLauncher;
import org.apache.gobblin.util.JobConfigurationUtils;
/**
* A factory class for building {@link JobLauncher} instances.
*
* @author Yinan Li
*/
@Slf4j
public class JobLauncherFactory {
/**
* Supported types of {@link JobLauncher}.
*/
public enum JobLauncherType {
LOCAL,
MAPREDUCE,
YARN
}
/**
* Create a new {@link JobLauncher}.
*
* <p>
* This method will never return a {@code null}.
* </p>
*
* @param sysProps system configuration properties
* @param jobProps job configuration properties
* @return newly created {@link JobLauncher}
*/
public static @Nonnull JobLauncher newJobLauncher(Properties sysProps, Properties jobProps) throws Exception {
return newJobLauncher(sysProps, jobProps, null);
}
/**
* Create a new {@link JobLauncher}.
*
* <p>
* This method will never return a {@code null}.
* </p>
*
* @param sysProps system configuration properties
* @param jobProps job configuration properties
* @param instanceBroker
* @return newly created {@link JobLauncher}
*/
public static @Nonnull JobLauncher newJobLauncher(Properties sysProps, Properties jobProps,
SharedResourcesBroker<GobblinScopeTypes> instanceBroker) throws Exception {
return newJobLauncher(sysProps, jobProps, instanceBroker, ImmutableList.of());
}
/**
* Create a new {@link JobLauncher}.
*
* <p>
* This method will never return a {@code null}.
* </p>
*
* @param sysProps system configuration properties
* @param jobProps job configuration properties
* @param instanceBroker
* @param metadataTags
* @return newly created {@link JobLauncher}
*/
public static @Nonnull JobLauncher newJobLauncher(Properties sysProps, Properties jobProps,
SharedResourcesBroker<GobblinScopeTypes> instanceBroker, List<? extends Tag<?>> metadataTags) throws Exception {
String launcherTypeValue =
sysProps.getProperty(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, JobLauncherType.LOCAL.name());
return newJobLauncher(sysProps, jobProps, launcherTypeValue, instanceBroker, metadataTags);
}
/**
* Creates a new instance for a JobLauncher with a given type
* @param sysProps the system/environment properties
* @param jobProps the job properties
* @param launcherTypeValue the type of the launcher; either a {@link JobLauncherType} value or
* the name of the class that extends {@link AbstractJobLauncher} and has a constructor
* that has a single Properties parameter..
* @return the JobLauncher instance
* @throws RuntimeException if the instantiation fails
*/
public static JobLauncher newJobLauncher(Properties sysProps, Properties jobProps,
String launcherTypeValue, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) {
return newJobLauncher(sysProps, jobProps, launcherTypeValue, instanceBroker, ImmutableList.of());
}
/**
* Creates a new instance for a JobLauncher with a given type
* @param sysProps the system/environment properties
* @param jobProps the job properties
* @param launcherTypeValue the type of the launcher; either a {@link JobLauncherType} value or
* the name of the class that extends {@link AbstractJobLauncher} and has a constructor
* that has a single Properties parameter..
* @param metadataTags additional metadata to be added to timing events
* @return the JobLauncher instance
* @throws RuntimeException if the instantiation fails
*/
public static JobLauncher newJobLauncher(Properties sysProps, Properties jobProps,
String launcherTypeValue, SharedResourcesBroker<GobblinScopeTypes> instanceBroker, List<? extends Tag<?>> metadataTags) {
Optional<JobLauncherType> launcherType = Enums.getIfPresent(JobLauncherType.class, launcherTypeValue);
try {
if (launcherType.isPresent()) {
switch (launcherType.get()) {
case LOCAL:
return new LocalJobLauncher(JobConfigurationUtils.combineSysAndJobProperties(sysProps, jobProps), instanceBroker, metadataTags);
case MAPREDUCE:
return new MRJobLauncher(JobConfigurationUtils.combineSysAndJobProperties(sysProps, jobProps), instanceBroker, metadataTags);
default:
throw new RuntimeException("Unsupported job launcher type: " + launcherType.get().name());
}
}
@SuppressWarnings("unchecked")
Class<? extends AbstractJobLauncher> launcherClass =
(Class<? extends AbstractJobLauncher>) Class.forName(launcherTypeValue);
return launcherClass.getDeclaredConstructor(Properties.class)
.newInstance(JobConfigurationUtils.combineSysAndJobProperties(sysProps, jobProps));
} catch (Exception e) {
throw new RuntimeException("Failed to create job launcher: " + e, e);
}
}
}
| 1,379 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskInstantiationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* An exception class to capture failures in Task instantiation
*/
public class TaskInstantiationException extends RuntimeException {
public TaskInstantiationException(String s) {
super(s);
}
}
| 1,380 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import com.google.common.eventbus.Subscribe;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.CommitSequenceStore;
import org.apache.gobblin.commit.DeliverySemantics;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.JobHistoryStore;
import org.apache.gobblin.metastore.MetaStoreModule;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.runtime.JobState.DatasetState;
import org.apache.gobblin.runtime.commit.FsCommitSequenceStore;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.Id;
import org.apache.gobblin.util.executors.IteratorExecutor;
/**
* A class carrying context information of a Gobblin job.
*
* @author Yinan Li
*/
public class JobContext implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(JobContext.class);
private static final String TASK_STAGING_DIR_NAME = "task-staging";
private static final String TASK_OUTPUT_DIR_NAME = "task-output";
private final String jobName;
private final String jobId;
private final String jobSequence;
private final JobState jobState;
@Getter(AccessLevel.PACKAGE)
private final JobCommitPolicy jobCommitPolicy;
private final Optional<JobMetrics> jobMetricsOptional;
private final Source<?, ?> source;
// State store for persisting job states
@Getter(AccessLevel.PACKAGE)
private final DatasetStateStore datasetStateStore;
// Store for runtime job execution information
private final Optional<JobHistoryStore> jobHistoryStoreOptional;
// Should commits be done in parallel
private final boolean parallelizeCommit;
private final int parallelCommits;
// Were WRITER_STAGING_DIR and WRITER_OUTPUT_DIR provided in the job file
@Getter
protected final Boolean stagingDirProvided;
@Getter
protected final Boolean outputDirProvided;
@Getter
private final DeliverySemantics semantics;
@Getter
private final Optional<CommitSequenceStore> commitSequenceStore;
private final Logger logger;
@Getter
private final SharedResourcesBroker<GobblinScopeTypes> jobBroker;
// A map from dataset URNs to DatasetStates (optional and maybe absent if not populated)
private Optional<Map<String, JobState.DatasetState>> datasetStatesByUrns = Optional.absent();
@Getter
private IssueRepository issueRepository;
public JobContext(Properties jobProps, Logger logger, SharedResourcesBroker<GobblinScopeTypes> instanceBroker,
IssueRepository issueRepository)
throws Exception {
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.JOB_NAME_KEY),
"A job must have a job name specified by job.name");
this.jobName = JobState.getJobNameFromProps(jobProps);
this.jobId = JobState.getJobIdFromProps(jobProps);
jobProps.setProperty(ConfigurationKeys.JOB_ID_KEY, this.jobId); // in case not yet directly defined as such
this.jobSequence = Long.toString(Id.Job.parse(this.jobId).getSequence());
this.jobBroker = instanceBroker.newSubscopedBuilder(new JobScopeInstance(this.jobName, this.jobId))
.withOverridingConfig(ConfigUtils.propertiesToConfig(jobProps)).build();
this.jobCommitPolicy = JobCommitPolicy.getCommitPolicy(jobProps);
this.datasetStateStore = createStateStore(ConfigUtils.propertiesToConfig(jobProps));
this.jobHistoryStoreOptional = createJobHistoryStore(jobProps);
this.issueRepository = issueRepository;
State jobPropsState = new State();
jobPropsState.addAll(jobProps);
this.jobState = new JobState(jobPropsState, this.jobName, this.jobId);
this.jobState.setBroker(this.jobBroker);
this.jobState.setWorkUnitAndDatasetStateFunctional(new CombinedWorkUnitAndDatasetStateGenerator(this.datasetStateStore, this.jobName));
stagingDirProvided = this.jobState.contains(ConfigurationKeys.WRITER_STAGING_DIR);
outputDirProvided = this.jobState.contains(ConfigurationKeys.WRITER_OUTPUT_DIR);
setTaskStagingAndOutputDirs();
if (GobblinMetrics.isEnabled(jobProps)) {
this.jobMetricsOptional = Optional.of(JobMetrics.get(this.jobState));
this.jobState.setProp(Instrumented.METRIC_CONTEXT_NAME_KEY, this.jobMetricsOptional.get().getName());
} else {
this.jobMetricsOptional = Optional.absent();
}
this.semantics = DeliverySemantics.parse(this.jobState);
this.commitSequenceStore = createCommitSequenceStore();
this.source = createSource(jobProps);
this.logger = logger;
this.parallelizeCommit = this.jobState.getPropAsBoolean(ConfigurationKeys.PARALLELIZE_DATASET_COMMIT,
ConfigurationKeys.DEFAULT_PARALLELIZE_DATASET_COMMIT);
this.parallelCommits = this.parallelizeCommit ? this.jobState
.getPropAsInt(ConfigurationKeys.DATASET_COMMIT_THREADS, ConfigurationKeys.DEFAULT_DATASET_COMMIT_THREADS) : 1;
}
protected DatasetStateStore createStateStore(Config jobConfig)
throws IOException {
boolean stateStoreEnabled = !jobConfig.hasPath(ConfigurationKeys.STATE_STORE_ENABLED) || jobConfig
.getBoolean(ConfigurationKeys.STATE_STORE_ENABLED);
String stateStoreType;
if (!stateStoreEnabled) {
stateStoreType = ConfigurationKeys.STATE_STORE_TYPE_NOOP;
} else {
stateStoreType = ConfigUtils.getString(jobConfig, ConfigurationKeys.DATASET_STATE_STORE_TYPE_KEY, ConfigUtils
.getString(jobConfig, ConfigurationKeys.STATE_STORE_TYPE_KEY, ConfigurationKeys.DEFAULT_STATE_STORE_TYPE));
}
ClassAliasResolver<DatasetStateStore.Factory> resolver = new ClassAliasResolver<>(DatasetStateStore.Factory.class);
try {
DatasetStateStore.Factory stateStoreFactory = resolver.resolveClass(stateStoreType).newInstance();
return stateStoreFactory.createStateStore(jobConfig);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
protected Optional<JobHistoryStore> createJobHistoryStore(Properties jobProps) {
boolean jobHistoryStoreEnabled = Boolean
.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_HISTORY_STORE_ENABLED_KEY, Boolean.FALSE.toString()));
if (jobHistoryStoreEnabled) {
Injector injector = Guice.createInjector(new MetaStoreModule(jobProps));
return Optional.of(injector.getInstance(JobHistoryStore.class));
} else {
return Optional.absent();
}
}
protected Optional<CommitSequenceStore> createCommitSequenceStore()
throws IOException {
if (this.semantics != DeliverySemantics.EXACTLY_ONCE) {
return Optional.<CommitSequenceStore>absent();
}
Preconditions.checkState(this.jobState.contains(FsCommitSequenceStore.GOBBLIN_RUNTIME_COMMIT_SEQUENCE_STORE_DIR));
try (FileSystem fs = FileSystem.get(URI.create(this.jobState
.getProp(FsCommitSequenceStore.GOBBLIN_RUNTIME_COMMIT_SEQUENCE_STORE_FS_URI, ConfigurationKeys.LOCAL_FS_URI)),
HadoopUtils.getConfFromState(this.jobState))) {
return Optional.<CommitSequenceStore>of(new FsCommitSequenceStore(fs,
new Path(this.jobState.getProp(FsCommitSequenceStore.GOBBLIN_RUNTIME_COMMIT_SEQUENCE_STORE_DIR))));
}
}
protected Source<?, ?> createSource(Properties jobProps)
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
return new SourceDecorator<>(
Source.class.cast(Class.forName(jobProps.getProperty(ConfigurationKeys.SOURCE_CLASS_KEY)).newInstance()),
this.jobId, logger);
}
/**
* Get the job name.
*
* @return job name
*/
public String getJobName() {
return this.jobName;
}
/**
* Get the job ID.
*
* @return job ID
*/
public String getJobId() {
return this.jobId;
}
/**
* Get the job key.
*
* @return job key
*/
public String getJobKey() {
return this.jobSequence;
}
/**
* Get a {@link JobState} object representing the job state.
*
* @return a {@link JobState} object representing the job state
*/
public JobState getJobState() {
return this.jobState;
}
/**
* Get an {@link Optional} of {@link JobMetrics}.
*
* @return an {@link Optional} of {@link JobMetrics}
*/
public Optional<JobMetrics> getJobMetricsOptional() {
return this.jobMetricsOptional;
}
/**
* Get an instance of the {@link Source} class specified in the job configuration.
*
* @return an instance of the {@link Source} class specified in the job configuration
*/
Source<?, ?> getSource() {
return this.source;
}
/**
* Appends two paths
* @param dir1
* @param dir2
* @return appended path
*/
protected static Path getJobDir(String dir1, String dir2) {
return new Path(dir1, dir2);
}
protected void setTaskStagingAndOutputDirs() {
// Add jobId to writer staging dir
if (this.jobState.contains(ConfigurationKeys.WRITER_STAGING_DIR)) {
String writerStagingDirWithJobId =
new Path(getJobDir(this.jobState.getProp(ConfigurationKeys.WRITER_STAGING_DIR), this.getJobName()),
this.jobId).toString();
this.jobState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, writerStagingDirWithJobId);
}
// Add jobId to writer output dir
if (this.jobState.contains(ConfigurationKeys.WRITER_OUTPUT_DIR)) {
String writerOutputDirWithJobId =
new Path(getJobDir(this.jobState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), this.getJobName()), this.jobId)
.toString();
this.jobState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, writerOutputDirWithJobId);
}
// Add jobId to task data root dir
if (this.jobState.contains(ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY)) {
String taskDataRootDirWithJobId =
new Path(getJobDir(this.jobState.getProp(ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY), this.getJobName()),
this.jobId).toString();
this.jobState.setProp(ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY, taskDataRootDirWithJobId);
setTaskStagingDir();
setTaskOutputDir();
} else {
LOG.warn("Property " + ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY + " is missing.");
}
}
/**
* If {@link ConfigurationKeys#WRITER_STAGING_DIR} (which is deprecated) is specified, use its value.
*
* Otherwise, if {@link ConfigurationKeys#TASK_DATA_ROOT_DIR_KEY} is specified, use its value
* plus {@link #TASK_STAGING_DIR_NAME}.
*/
private void setTaskStagingDir() {
if (this.jobState.contains(ConfigurationKeys.WRITER_STAGING_DIR)) {
LOG.warn(String.format("Property %s is deprecated. No need to use it if %s is specified.",
ConfigurationKeys.WRITER_STAGING_DIR, ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY));
} else {
String workingDir = this.jobState.getProp(ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY);
this.jobState
.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(workingDir, TASK_STAGING_DIR_NAME).toString());
LOG.info(String.format("Writer Staging Directory is set to %s.",
this.jobState.getProp(ConfigurationKeys.WRITER_STAGING_DIR)));
}
}
/**
* If {@link ConfigurationKeys#WRITER_OUTPUT_DIR} (which is deprecated) is specified, use its value.
*
* Otherwise, if {@link ConfigurationKeys#TASK_DATA_ROOT_DIR_KEY} is specified, use its value
* plus {@link #TASK_OUTPUT_DIR_NAME}.
*/
private void setTaskOutputDir() {
if (this.jobState.contains(ConfigurationKeys.WRITER_OUTPUT_DIR)) {
LOG.warn(String.format("Property %s is deprecated. No need to use it if %s is specified.",
ConfigurationKeys.WRITER_OUTPUT_DIR, ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY));
} else {
String workingDir = this.jobState.getProp(ConfigurationKeys.TASK_DATA_ROOT_DIR_KEY);
this.jobState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(workingDir, TASK_OUTPUT_DIR_NAME).toString());
LOG.info(String
.format("Writer Output Directory is set to %s.", this.jobState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)));
}
}
/**
* Return whether staging data should be cleaned up on a per-task basis.
*
* @return {@code true} if staging data should be cleaned up on a per-task basis or {@code false} otherwise
*/
boolean shouldCleanupStagingDataPerTask() {
return this.jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_PER_TASK,
ConfigurationKeys.DEFAULT_CLEANUP_STAGING_DATA_PER_TASK);
}
/**
* Get a {@link Map} from dataset URNs (as being specified by {@link ConfigurationKeys#DATASET_URN_KEY} to
* {@link JobState.DatasetState} objects that represent the dataset states and store {@link TaskState}s
* corresponding to the datasets.
*
* @see JobState#createDatasetStatesByUrns().
*
* @return a {@link Map} from dataset URNs to {@link JobState.DatasetState}s representing the dataset states
*/
Map<String, JobState.DatasetState> getDatasetStatesByUrns() {
return ImmutableMap.copyOf(this.datasetStatesByUrns.or(Maps.<String, JobState.DatasetState>newHashMap()));
}
/**
* Store job execution information into the job history store.
*/
void storeJobExecutionInfo() {
if (this.jobHistoryStoreOptional.isPresent()) {
try {
this.logger.info("Writing job execution information to the job history store");
this.jobHistoryStoreOptional.get().put(this.jobState.toJobExecutionInfo());
} catch (IOException ioe) {
this.logger.error("Failed to write job execution information to the job history store: " + ioe, ioe);
}
}
}
@Subscribe
public void handleNewTaskCompletionEvent(NewTaskCompletionEvent newOutputTaskStateEvent) {
LOG.info("{} more tasks of job {} have completed", newOutputTaskStateEvent.getTaskStates().size(), this.jobId);
// Update the job execution history store upon new task completion
storeJobExecutionInfo();
}
/**
* Finalize the {@link JobState} before committing the job.
*/
void finalizeJobStateBeforeCommit() {
this.jobState.setEndTime(System.currentTimeMillis());
this.jobState.setDuration(this.jobState.getEndTime() - this.jobState.getStartTime());
for (TaskState taskState : this.jobState.getTaskStates()) {
// Set fork.branches explicitly here so the rest job flow can pick it up
this.jobState
.setProp(ConfigurationKeys.FORK_BRANCHES_KEY, taskState.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1));
}
}
/**
* Commit the job on a per-dataset basis.
*/
void commit()
throws IOException {
commit(false);
}
/**
* Commit the job based on whether the job is cancelled.
*/
void commit(final boolean isJobCancelled)
throws IOException {
this.datasetStatesByUrns = Optional.of(computeDatasetStatesByUrns());
final boolean shouldCommitDataInJob = shouldCommitDataInJob(this.jobState);
final DeliverySemantics deliverySemantics = DeliverySemantics.parse(this.jobState);
final int numCommitThreads = numCommitThreads();
if (!shouldCommitDataInJob) {
this.logger.info("Job will not commit data since data are committed by tasks.");
}
try {
if (this.datasetStatesByUrns.isPresent()) {
this.logger.info("Persisting dataset urns.");
this.datasetStateStore.persistDatasetURNs(this.jobName, this.datasetStatesByUrns.get().keySet());
}
List<Either<Void, ExecutionException>> result = new IteratorExecutor<>(Iterables
.transform(this.datasetStatesByUrns.get().entrySet(),
new Function<Map.Entry<String, DatasetState>, Callable<Void>>() {
@Nullable
@Override
public Callable<Void> apply(final Map.Entry<String, DatasetState> entry) {
return createSafeDatasetCommit(shouldCommitDataInJob, isJobCancelled, deliverySemantics,
entry.getKey(), entry.getValue(), numCommitThreads > 1, JobContext.this);
}
}).iterator(), numCommitThreads,
ExecutorsUtils.newThreadFactory(Optional.of(this.logger), Optional.of("Commit-thread-%d")))
.executeAndGetResults();
IteratorExecutor.logFailures(result, LOG, 10);
if (!IteratorExecutor.verifyAllSuccessful(result)) {
this.jobState.setState(JobState.RunningState.FAILED);
String errMsg = "Failed to commit dataset state for some dataset(s) of job " + this.jobId;
this.jobState.setJobFailureMessage(errMsg);
throw new IOException(errMsg);
}
} catch (InterruptedException exc) {
throw new IOException(exc);
}
this.jobState.setState(JobState.RunningState.COMMITTED);
}
@Override
public void close()
throws IOException {
this.jobBroker.close();
}
private int numCommitThreads() {
return this.parallelCommits;
}
/**
* The only reason for this methods is so that we can test the parallelization of commits.
* DO NOT OVERRIDE.
*/
@VisibleForTesting
protected Callable<Void> createSafeDatasetCommit(boolean shouldCommitDataInJob, boolean isJobCancelled,
DeliverySemantics deliverySemantics, String datasetUrn, JobState.DatasetState datasetState,
boolean isMultithreaded, JobContext jobContext) {
return new SafeDatasetCommit(shouldCommitDataInJob, isJobCancelled, deliverySemantics, datasetUrn, datasetState,
isMultithreaded, jobContext);
}
protected Map<String, JobState.DatasetState> computeDatasetStatesByUrns() {
return this.jobState.createDatasetStatesByUrns();
}
@SuppressWarnings("unchecked")
public static Optional<Class<? extends DataPublisher>> getJobDataPublisherClass(State state)
throws ReflectiveOperationException {
if (!Strings.isNullOrEmpty(state.getProp(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE))) {
return Optional.<Class<? extends DataPublisher>>of(
(Class<? extends DataPublisher>) Class.forName(state.getProp(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE)));
} else if (!Strings.isNullOrEmpty(state.getProp(ConfigurationKeys.DATA_PUBLISHER_TYPE))) {
return Optional.<Class<? extends DataPublisher>>of(
(Class<? extends DataPublisher>) Class.forName(state.getProp(ConfigurationKeys.DATA_PUBLISHER_TYPE)));
} else {
LOG.info("Property " + ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE + " not specified");
return Optional.<Class<? extends DataPublisher>>absent();
}
}
/**
* Whether data should be committed by the job (as opposed to being commited by the tasks).
* Data should be committed by the job if either {@link ConfigurationKeys#JOB_COMMIT_POLICY_KEY} is set to "full",
* or {@link ConfigurationKeys#PUBLISH_DATA_AT_JOB_LEVEL} is set to true.
*/
private static boolean shouldCommitDataInJob(State state) {
boolean jobCommitPolicyIsFull =
JobCommitPolicy.getCommitPolicy(state.getProperties()) == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS;
boolean publishDataAtJobLevel = state.getPropAsBoolean(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL,
ConfigurationKeys.DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL);
boolean jobDataPublisherSpecified =
!Strings.isNullOrEmpty(state.getProp(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE));
return jobCommitPolicyIsFull || publishDataAtJobLevel || jobDataPublisherSpecified;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(JobContext.class.getSimpleName()).add("jobName", getJobName())
.add("jobId", getJobId()).add("jobState", getJobState()).toString();
}
/**
* Get all of the failures from the datasetStates stored in the jobContext to determine if
* email notification should be sent or not. Previously job context only looked at jobStates, where
* failures from datasetStates were not propagated from
* Failures are tracked using {@link ConfigurationKeys#JOB_FAILURES_KEY}
*/
public int getDatasetStateFailures() {
int totalFailures = 0;
for (Map.Entry<String, JobState.DatasetState> datasetState: this.getDatasetStatesByUrns().entrySet()) {
totalFailures += datasetState.getValue().getJobFailures();
}
return totalFailures;
}
}
| 1,381 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.Closeable;
import javax.annotation.Nullable;
import org.apache.gobblin.runtime.listeners.JobListener;
/**
* An interface for classes that launch a Gobblin job.
*
* <p>
* A {@link JobLauncher} is not supposed to be reused, i.e., each {@link JobLauncher}
* should only be used to launch a single job.
* </p>
*
* @author Yinan Li
*/
public interface JobLauncher extends Closeable {
/**
* Launch a Gobblin job.
*
* <p>
* This method is synchronous, i.e., the caller will be blocked until the job finishes. The method
* {@link JobListener#onJobCompletion(JobContext)} of the given {@link JobListener} will be called at
* the end if no uncaught exceptions are thrown before the method gets called.
* </p>
*
* @param jobListener a {@link JobListener} instance on which {@link JobListener#onJobCompletion(JobContext)}
* is called at the end of this method if it is not {@code null}
* @throws JobException if there is anything wrong launching and running the job
*/
public void launchJob(@Nullable JobListener jobListener)
throws JobException;
/**
* Cancel a Gobblin job.
*
* <p>
* This method is synchronous, i.e., the caller will be blocked until the cancellation is executed.
* The method {@link JobListener#onJobCancellation(JobContext)} of the given {@link JobListener} will
* be called at the end if the caller is not interrupted while being blocked. If a cancellation has
* already been requested, however, this method will return immediately.
* </p>
*
* @param jobListener {@link JobListener} instance on which {@link JobListener#onJobCancellation(JobContext)}
* is called at the end of this method if it is not {@code null}
* @throws JobException if there is anything wrong cancelling the job
*/
public void cancelJob(@Nullable JobListener jobListener)
throws JobException;
public default boolean isEarlyStopped() {
return false;
}
}
| 1,382 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/BoundedBlockingRecordQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Queues;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* A class implementing a bounded blocking queue with timeout for buffering records between a producer and a consumer.
*
* <p>
* In addition to the normal queue operations, this class also keeps track of the following statistics:
*
* <ul>
* <li>Queue size.</li>
* <li>Queue fill ratio (queue size/queue capacity).</li>
* <li>Put attempt count.</li>
* <li>Mean rate of put attempts (puts/sec).</li>
* <li>Get attempt count.</li>
* <li>Mean rate of get attempts (gets/sec).</li>
* </ul>
* </p>
*
* @author Yinan Li
*/
public class BoundedBlockingRecordQueue<T> {
private final int capacity;
private final long timeout;
private final TimeUnit timeoutTimeUnit;
private final BlockingQueue<T> blockingQueue;
private final Optional<QueueStats> queueStats;
private BoundedBlockingRecordQueue(Builder<T> builder) {
Preconditions.checkArgument(builder.capacity > 0, "Invalid queue capacity");
Preconditions.checkArgument(builder.timeout > 0, "Invalid timeout time");
this.capacity = builder.capacity;
this.timeout = builder.timeout;
this.timeoutTimeUnit = builder.timeoutTimeUnit;
this.blockingQueue = Queues.newArrayBlockingQueue(builder.capacity);
this.queueStats = builder.ifCollectStats ? Optional.of(new QueueStats()) : Optional.<QueueStats> absent();
}
/**
* Put a record to the tail of the queue, waiting (up to the configured timeout time)
* for an empty space to become available.
*
* @param record the record to put to the tail of the queue
* @return whether the record has been successfully put into the queue
* @throws InterruptedException if interrupted while waiting
*/
public boolean put(T record) throws InterruptedException {
boolean offered = this.blockingQueue.offer(record, this.timeout, this.timeoutTimeUnit);
if (this.queueStats.isPresent()) {
this.queueStats.get().putsRateMeter.mark();
}
return offered;
}
/**
* Get a record from the head of the queue, waiting (up to the configured timeout time)
* for a record to become available.
*
* @return the record at the head of the queue, or <code>null</code> if no record is available
* @throws InterruptedException if interrupted while waiting
*/
public T get() throws InterruptedException {
T record = this.blockingQueue.poll(this.timeout, this.timeoutTimeUnit);
if (this.queueStats.isPresent()) {
this.queueStats.get().getsRateMeter.mark();
}
return record;
}
/**
* Get a {@link QueueStats} object representing queue statistics of this {@link BoundedBlockingRecordQueue}.
*
* @return a {@link QueueStats} object wrapped in an {@link com.google.common.base.Optional},
* which means it may be absent if collecting of queue statistics is not enabled.
*/
public Optional<QueueStats> stats() {
return this.queueStats;
}
/**
* Clear the queue.
*/
public void clear() {
this.blockingQueue.clear();
}
/**
* Get a new {@link BoundedBlockingRecordQueue.Builder}.
*
* @param <T> record type
* @return a new {@link BoundedBlockingRecordQueue.Builder}
*/
public static <T> Builder<T> newBuilder() {
return new Builder<>();
}
/**
* A builder class for {@link BoundedBlockingRecordQueue}.
*
* @param <T> record type
*/
public static class Builder<T> {
private int capacity = ConfigurationKeys.DEFAULT_FORK_RECORD_QUEUE_CAPACITY;
private long timeout = ConfigurationKeys.DEFAULT_FORK_RECORD_QUEUE_TIMEOUT;
private TimeUnit timeoutTimeUnit = TimeUnit.MILLISECONDS;
private boolean ifCollectStats = false;
/**
* Configure the capacity of the queue.
*
* @param capacity the capacity of the queue
* @return this {@link Builder} instance
*/
public Builder<T> hasCapacity(int capacity) {
this.capacity = capacity;
return this;
}
/**
* Configure the timeout time of queue operations.
*
* @param timeout the time timeout time
* @return this {@link Builder} instance
*/
public Builder<T> useTimeout(long timeout) {
this.timeout = timeout;
return this;
}
/**
* Configure the timeout time unit of queue operations.
*
* @param timeoutTimeUnit the time timeout time unit
* @return this {@link Builder} instance
*/
public Builder<T> useTimeoutTimeUnit(TimeUnit timeoutTimeUnit) {
this.timeoutTimeUnit = timeoutTimeUnit;
return this;
}
/**
* Configure whether to collect queue statistics.
*
* @return this {@link Builder} instance
*/
public Builder<T> collectStats() {
this.ifCollectStats = true;
return this;
}
/**
* Build a new {@link BoundedBlockingRecordQueue}.
*
* @return the newly built {@link BoundedBlockingRecordQueue}
*/
public BoundedBlockingRecordQueue<T> build() {
return new BoundedBlockingRecordQueue<>(this);
}
}
/**
* A class for collecting queue statistics.
*
* <p>
* All statistics will have zero values if collecting of statistics is not enabled.
* </p>
*/
public class QueueStats {
public static final String QUEUE_SIZE = "queueSize";
public static final String FILL_RATIO = "fillRatio";
public static final String PUT_ATTEMPT_RATE = "putAttemptRate";
public static final String GET_ATTEMPT_RATE = "getAttemptRate";
public static final String PUT_ATTEMPT_COUNT = "putAttemptCount";
public static final String GET_ATTEMPT_COUNT = "getAttemptCount";
private final Gauge<Integer> queueSizeGauge;
private final Gauge<Double> fillRatioGauge;
private final Meter putsRateMeter;
private final Meter getsRateMeter;
public QueueStats() {
this.queueSizeGauge = new Gauge<Integer>() {
@Override
public Integer getValue() {
return BoundedBlockingRecordQueue.this.blockingQueue.size();
}
};
this.fillRatioGauge = new Gauge<Double>() {
@Override
public Double getValue() {
return (double) BoundedBlockingRecordQueue.this.blockingQueue.size()
/ BoundedBlockingRecordQueue.this.capacity;
}
};
this.putsRateMeter = new Meter();
this.getsRateMeter = new Meter();
}
/**
* Return the queue size.
*
* @return the queue size
*/
public int queueSize() {
return this.queueSizeGauge.getValue();
}
/**
* Return the queue fill ratio.
*
* @return the queue fill ratio
*/
public double fillRatio() {
return this.fillRatioGauge.getValue();
}
/**
* Return the rate of put attempts.
*
* @return the rate of put attempts
*/
public double putAttemptRate() {
return this.putsRateMeter.getMeanRate();
}
/**
* Return the total count of put attempts.
*
* @return the total count of put attempts
*/
public long putAttemptCount() {
return this.putsRateMeter.getCount();
}
/**
* Return the rate of get attempts.
*
* @return the rate of get attempts
*/
public double getAttemptRate() {
return this.getsRateMeter.getMeanRate();
}
/**
* Return the total count of get attempts.
*
* @return the total count of get attempts
*/
public long getAttemptCount() {
return this.getsRateMeter.getCount();
}
/**
* Register all statistics as {@link com.codahale.metrics.Metric}s with a
* {@link com.codahale.metrics.MetricRegistry}.
*
* @param metricRegistry the {@link com.codahale.metrics.MetricRegistry} to register with
* @param prefix metric name prefix
*/
public void registerAll(MetricRegistry metricRegistry, String prefix) {
metricRegistry.register(MetricRegistry.name(prefix, QUEUE_SIZE), this.queueSizeGauge);
metricRegistry.register(MetricRegistry.name(prefix, FILL_RATIO), this.fillRatioGauge);
metricRegistry.register(MetricRegistry.name(prefix, PUT_ATTEMPT_RATE), this.putsRateMeter);
metricRegistry.register(MetricRegistry.name(prefix, GET_ATTEMPT_RATE), this.getsRateMeter);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("[");
sb.append(QUEUE_SIZE).append("=").append(queueSize()).append(", ");
sb.append(FILL_RATIO).append("=").append(fillRatio()).append(", ");
sb.append(PUT_ATTEMPT_RATE).append("=").append(putAttemptRate()).append(", ");
sb.append(PUT_ATTEMPT_COUNT).append("=").append(putAttemptCount()).append(", ");
sb.append(GET_ATTEMPT_RATE).append("=").append(getAttemptRate()).append(", ");
sb.append(GET_ATTEMPT_COUNT).append("=").append(getAttemptCount()).append("]");
return sb.toString();
}
}
}
| 1,383 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/StreamModelTaskRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.Futures;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.fork.Forker;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyChecker;
import org.apache.gobblin.records.RecordStreamProcessor;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.runtime.fork.Fork;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.StreamingExtractor;
import org.apache.gobblin.util.ExponentialBackoff;
import org.apache.gobblin.util.LoggingUncaughtExceptionHandler;
import org.apache.gobblin.writer.AcknowledgableWatermark;
import org.apache.gobblin.writer.FineGrainedWatermarkTracker;
import org.apache.gobblin.writer.WatermarkManager;
import org.apache.gobblin.writer.WatermarkStorage;
import io.reactivex.Flowable;
import io.reactivex.flowables.ConnectableFlowable;
import io.reactivex.schedulers.Schedulers;
import lombok.AllArgsConstructor;
/**
* A helper class to run {@link Task} in stream mode. Prevents {@link Task} from loading reactivex classes when not
* needed.
*/
@AllArgsConstructor
public class StreamModelTaskRunner {
private final Task task;
private final TaskState taskState;
private final Closer closer;
private final TaskContext taskContext;
private final Extractor extractor;
private final Converter converter;
private final List<RecordStreamProcessor<?,?,?,?>> recordStreamProcessors;
private final RowLevelPolicyChecker rowChecker;
private final TaskExecutor taskExecutor;
private final ExecutionModel taskMode;
private final AtomicBoolean shutdownRequested;
private final Optional<FineGrainedWatermarkTracker> watermarkTracker;
private final Optional<WatermarkManager> watermarkManager;
private final Optional<WatermarkStorage> watermarkStorage;
private final Map<Optional<Fork>, Optional<Future<?>>> forks;
protected void run() throws Exception {
long maxWaitInMinute = taskState.getPropAsLong(ConfigurationKeys.FORK_MAX_WAIT_MININUTES, ConfigurationKeys.DEFAULT_FORK_MAX_WAIT_MININUTES);
long initialDelay = taskState.getPropAsLong(ConfigurationKeys.FORK_FINISHED_CHECK_INTERVAL, ConfigurationKeys.DEFAULT_FORK_FINISHED_CHECK_INTERVAL);
// Get the fork operator. By default IdentityForkOperator is used with a single branch.
ForkOperator forkOperator = closer.register(this.taskContext.getForkOperator());
RecordStreamWithMetadata<?, ?> stream = this.extractor.recordStream(this.shutdownRequested);
// This prevents emitting records until a connect() call is made on the connectable stream
ConnectableFlowable connectableStream = stream.getRecordStream().publish();
// The cancel is not propagated to the extractor's record generator when it has been turned into a hot Flowable
// by publish, and in the case that extractor stuck in reading record when cancel get called,
// we directly call shutdown to force it instead of setting the shutdownRequested flag on cancel to stop the extractor
Flowable streamWithShutdownOnCancel = connectableStream.doOnCancel(this.extractor::shutdown);
stream = stream.withRecordStream(streamWithShutdownOnCancel);
stream = stream.mapRecords(r -> {
this.task.onRecordExtract();
return r;
});
if (this.task.isStreamingTask()) {
// Start watermark manager and tracker
if (this.watermarkTracker.isPresent()) {
this.watermarkTracker.get().start();
}
this.watermarkManager.get().start();
((StreamingExtractor) this.taskContext.getRawSourceExtractor()).start(this.watermarkStorage.get());
stream = stream.mapRecords(r -> {
AcknowledgableWatermark ackableWatermark = new AcknowledgableWatermark(r.getWatermark());
if (watermarkTracker.isPresent()) {
watermarkTracker.get().track(ackableWatermark);
}
r.addCallBack(ackableWatermark);
return r;
});
}
// Use the recordStreamProcessor list if it is configured. This list can contain both all RecordStreamProcessor types
if (!this.recordStreamProcessors.isEmpty()) {
for (RecordStreamProcessor streamProcessor : this.recordStreamProcessors) {
stream = streamProcessor.processStream(stream, this.taskState);
}
} else {
if (this.converter instanceof MultiConverter) {
// if multiconverter, unpack it
for (Converter cverter : ((MultiConverter) this.converter).getConverters()) {
stream = cverter.processStream(stream, this.taskState);
}
} else {
stream = this.converter.processStream(stream, this.taskState);
}
}
stream = this.rowChecker.processStream(stream, this.taskState);
Forker.ForkedStream<?, ?> forkedStreams = new Forker().forkStream(stream, forkOperator, this.taskState);
boolean isForkAsync = !this.task.areSingleBranchTasksSynchronous(this.taskContext) || forkedStreams.getForkedStreams().size() > 1;
int bufferSize =
this.taskState.getPropAsInt(ConfigurationKeys.FORK_RECORD_QUEUE_CAPACITY_KEY, ConfigurationKeys.DEFAULT_FORK_RECORD_QUEUE_CAPACITY);
for (int fidx = 0; fidx < forkedStreams.getForkedStreams().size(); fidx ++) {
RecordStreamWithMetadata<?, ?> forkedStream = forkedStreams.getForkedStreams().get(fidx);
if (forkedStream != null) {
if (isForkAsync) {
forkedStream = forkedStream.mapStream(f -> f.observeOn(Schedulers.from(this.taskExecutor.getForkExecutor()), false, bufferSize));
}
Fork fork = new Fork(this.taskContext, forkedStream.getGlobalMetadata().getSchema(), forkedStreams.getForkedStreams().size(), fidx, this.taskMode);
fork.consumeRecordStream(forkedStream);
this.forks.put(Optional.of(fork), Optional.of(Futures.immediateFuture(null)));
this.task.configureStreamingFork(fork);
}
}
Thread thread = new Thread(() -> connectableStream.connect());
thread.setName(this.getClass().getSimpleName());
//Log uncaught exceptions (e.g.OOMEs) to prevent threads from dying silently
thread.setUncaughtExceptionHandler(new LoggingUncaughtExceptionHandler(Optional.absent()));
thread.start();
if (!ExponentialBackoff.awaitCondition().callable(() -> this.forks.keySet().stream().map(Optional::get).allMatch(Fork::isDone)).
initialDelay(initialDelay).maxDelay(initialDelay).maxWait(TimeUnit.MINUTES.toMillis(maxWaitInMinute)).await()) {
throw new TimeoutException("Forks did not finish withing specified timeout.");
}
}
}
| 1,384 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/JobState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.runtime.job.JobProgress;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.Text;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Meter;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.gson.stream.JsonWriter;
import com.linkedin.data.template.StringMap;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.rest.JobExecutionInfo;
import org.apache.gobblin.rest.JobStateEnum;
import org.apache.gobblin.rest.LauncherTypeEnum;
import org.apache.gobblin.rest.Metric;
import org.apache.gobblin.rest.MetricArray;
import org.apache.gobblin.rest.MetricTypeEnum;
import org.apache.gobblin.rest.TaskExecutionInfoArray;
import org.apache.gobblin.runtime.api.MonitoredObject;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.runtime.util.MetricGroup;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ImmutableProperties;
import org.apache.gobblin.util.JobLauncherUtils;
/**
* A class for tracking job state information.
*
* @author Yinan Li
*/
public class JobState extends SourceState implements JobProgress {
/**
* An enumeration of possible job states, which are identical to
* {@link org.apache.gobblin.configuration.WorkUnitState.WorkingState}
* in terms of naming.
*
* <p> Status state diagram:
* <ul>
* <li> null => PENDING
* <li> PENDING => RUNNING
* <li> PENDING => CANCELLED
* <li> RUNNING => CANCELLED
* <li> RUNNING => SUCCESSFUL
* <li> RUNNING => FAILED
* <li> SUCCESSFUL => COMMITTED
* <li> SUCCESSFUL => CANCELLED (cancelled before committing)
* </ul>
*/
public enum RunningState implements MonitoredObject {
/** Pending creation of {@link WorkUnit}s. */
PENDING,
/** Starting the execution of {@link WorkUnit}s. */
RUNNING,
/** All {@link WorkUnit}s have finished successfully or the job commit policy is
* {@link JobCommitPolicy#COMMIT_ON_PARTIAL_SUCCESS} */
SUCCESSFUL,
/** Job state has been committed */
COMMITTED,
/** At least one {@link WorkUnit}s has failed for a job with job commit policy
* {@link JobCommitPolicy#COMMIT_ON_FULL_SUCCESS}. */
FAILED,
/** The execution of the job was cancelled. */
CANCELLED;
public boolean isCancelled() {
return this.equals(CANCELLED);
}
public boolean isDone() {
return this.equals(COMMITTED) || this.equals(FAILED) || this.equals(CANCELLED);
}
public boolean isSuccess() {
return this.equals(COMMITTED);
}
public boolean isFailure() {
return this.equals(FAILED);
}
public boolean isRunningOrDone() {
return isDone() || this.equals(RUNNING);
}
}
@Getter @Setter
private String jobName;
@Getter @Setter
private String jobId;
/** job start time in milliseconds */
@Getter @Setter
private long startTime = 0;
/** job end time in milliseconds */
@Getter @Setter
private long endTime = 0;
/** job duration in milliseconds */
@Getter @Setter
private long duration = 0;
private RunningState state = RunningState.PENDING;
/** the number of tasks this job consists of */
@Getter @Setter
private int taskCount = 0;
private final Map<String, TaskState> taskStates = Maps.newLinkedHashMap();
// Skipped task states shouldn't be exposed to publisher, but they need to be in JobState and DatasetState so that they can be written to StateStore.
private final Map<String, TaskState> skippedTaskStates = Maps.newLinkedHashMap();
private DatasetStateStore datasetStateStore;
// Necessary for serialization/deserialization
public JobState() {
}
public JobState(String jobName, String jobId) {
this.jobName = jobName;
this.jobId = jobId;
this.setId(jobId);
}
public JobState(State properties, String jobName, String jobId) {
super(properties);
this.jobName = jobName;
this.jobId = jobId;
this.setId(jobId);
}
public JobState(State properties, Map<String, JobState.DatasetState> previousDatasetStates, String jobName,
String jobId) {
super(properties, previousDatasetStates, workUnitStatesFromDatasetStates(previousDatasetStates.values()));
this.jobName = jobName;
this.jobId = jobId;
this.setId(jobId);
}
public static String getJobNameFromState(State state) {
return state.getProp(ConfigurationKeys.JOB_NAME_KEY);
}
public static String getJobNameFromProps(Properties props) {
return props.getProperty(ConfigurationKeys.JOB_NAME_KEY);
}
public static String getJobIdFromProps(Properties props) {
return props.containsKey(ConfigurationKeys.JOB_ID_KEY) ? props.getProperty(ConfigurationKeys.JOB_ID_KEY)
: JobLauncherUtils.newJobId(JobState.getJobNameFromProps(props));
}
public static String getJobGroupFromState(State state) {
return state.getProp(ConfigurationKeys.JOB_GROUP_KEY);
}
public static String getJobGroupFromProps(Properties props) {
return props.getProperty(ConfigurationKeys.JOB_GROUP_KEY);
}
public static String getJobDescriptionFromProps(State state) {
return state.getProp(ConfigurationKeys.JOB_DESCRIPTION_KEY);
}
public static String getJobDescriptionFromProps(Properties props) {
return props.getProperty(ConfigurationKeys.JOB_DESCRIPTION_KEY);
}
/**
* Get the currently elapsed time for this job.
* @return
*/
public long getElapsedTime() {
if (this.endTime > 0) {
return this.endTime - this.startTime;
}
if (this.startTime > 0) {
return System.currentTimeMillis() - this.startTime;
}
return 0;
}
/**
* Get job running state of type {@link RunningState}.
*
* @return job running state of type {@link RunningState}
*/
public synchronized RunningState getState() {
return this.state;
}
/**
* Set job running state of type {@link RunningState}.
*
* @param state job running state of type {@link RunningState}
*/
public synchronized void setState(RunningState state) {
this.state = state;
}
/**
* If not already present, set the {@link ConfigurationKeys#JOB_FAILURE_EXCEPTION_KEY} to a {@link String}
* representation of the given {@link Throwable}.
*/
public void setJobFailureException(Throwable jobFailureException) {
String previousExceptions = this.getProp(ConfigurationKeys.JOB_FAILURE_EXCEPTION_KEY);
String currentException = Throwables.getStackTraceAsString(jobFailureException);
String aggregatedExceptions;
if (StringUtils.isEmpty(previousExceptions)) {
aggregatedExceptions = currentException;
} else {
aggregatedExceptions = currentException + "\n\n" + previousExceptions;
}
this.setProp(ConfigurationKeys.JOB_FAILURE_EXCEPTION_KEY, aggregatedExceptions);
}
/**
* If not already present, set the {@link EventMetadataUtils#JOB_FAILURE_MESSAGE_KEY} to the given {@link String}.
*/
public void setJobFailureMessage(String jobFailureMessage) {
String previousMessages = this.getProp(ConfigurationKeys.JOB_FAILURE_EXCEPTION_KEY);
String aggregatedMessages;
if (StringUtils.isEmpty(previousMessages)) {
aggregatedMessages = jobFailureMessage;
} else {
aggregatedMessages = jobFailureMessage + ", " + previousMessages;
}
this.setProp(EventMetadataUtils.JOB_FAILURE_MESSAGE_KEY, aggregatedMessages);
}
/**
* Increment the number of tasks by 1.
*/
public void incrementTaskCount() {
this.taskCount++;
}
/**
* Add a single {@link TaskState}.
*
* @param taskState {@link TaskState} to add
*/
public void addTaskState(TaskState taskState) {
this.taskStates.put(taskState.getTaskId(), taskState);
}
public void addSkippedTaskState(TaskState taskState) {
this.skippedTaskStates.put(taskState.getTaskId(), taskState);
}
public void removeTaskState(TaskState taskState) {
this.taskStates.remove(taskState.getTaskId());
this.taskCount--;
}
/**
* Filter the task states corresponding to the skipped work units and add it to the skippedTaskStates
*/
public void filterSkippedTaskStates() {
List<TaskState> skippedTaskStates = new ArrayList<>();
for (TaskState taskState : this.taskStates.values()) {
if (taskState.getWorkingState() == WorkUnitState.WorkingState.SKIPPED) {
skippedTaskStates.add(taskState);
}
}
for (TaskState taskState : skippedTaskStates) {
removeTaskState(taskState);
addSkippedTaskState(taskState);
}
}
/**
* Add a collection of {@link TaskState}s.
*
* @param taskStates collection of {@link TaskState}s to add
*/
public void addTaskStates(Collection<TaskState> taskStates) {
for (TaskState taskState : taskStates) {
this.taskStates.put(taskState.getTaskId(), taskState);
}
}
public void addSkippedTaskStates(Collection<TaskState> taskStates) {
for (TaskState taskState : taskStates) {
addSkippedTaskState(taskState);
}
}
/**
* Get the number of completed tasks.
*
* @return number of completed tasks
*/
public int getCompletedTasks() {
int completedTasks = 0;
for (TaskState taskState : this.taskStates.values()) {
if (taskState.isCompleted()) {
completedTasks++;
}
}
return completedTasks;
}
/**
* Get {@link TaskState}s of {@link Task}s of this job.
*
* @return a list of {@link TaskState}s
*/
public List<TaskState> getTaskStates() {
return ImmutableList.<TaskState>builder().addAll(this.taskStates.values()).build();
}
@Override
public List<TaskState> getTaskProgress() {
return getTaskStates();
}
/**
* Create a {@link Map} from dataset URNs (as being specified by {@link ConfigurationKeys#DATASET_URN_KEY} to
* {@link DatasetState} objects that represent the dataset states and store {@link TaskState}s corresponding
* to the datasets.
*
* <p>
* {@link TaskState}s that do not have {@link ConfigurationKeys#DATASET_URN_KEY} set will be added to
* the dataset state belonging to {@link ConfigurationKeys#DEFAULT_DATASET_URN}.
* </p>
*
* @return a {@link Map} from dataset URNs to {@link DatasetState}s representing the dataset states
*/
public Map<String, DatasetState> createDatasetStatesByUrns() {
Map<String, DatasetState> datasetStatesByUrns = Maps.newHashMap();
for (TaskState taskState : this.taskStates.values()) {
String datasetUrn = createDatasetUrn(datasetStatesByUrns, taskState);
datasetStatesByUrns.get(datasetUrn).incrementTaskCount();
datasetStatesByUrns.get(datasetUrn).addTaskState(taskState);
}
for (TaskState taskState : this.skippedTaskStates.values()) {
String datasetUrn = createDatasetUrn(datasetStatesByUrns, taskState);
datasetStatesByUrns.get(datasetUrn).addSkippedTaskState(taskState);
}
return ImmutableMap.copyOf(datasetStatesByUrns);
}
private String createDatasetUrn(Map<String, DatasetState> datasetStatesByUrns, TaskState taskState) {
String datasetUrn = taskState.getProp(ConfigurationKeys.DATASET_URN_KEY, ConfigurationKeys.DEFAULT_DATASET_URN);
if (!datasetStatesByUrns.containsKey(datasetUrn)) {
DatasetState datasetState = newDatasetState(false);
datasetState.setDatasetUrn(datasetUrn);
datasetStatesByUrns.put(datasetUrn, datasetState);
}
return datasetUrn;
}
/**
* Get task states of {@link Task}s of this job as {@link WorkUnitState}s.
*
* @return a list of {@link WorkUnitState}s
*/
public List<WorkUnitState> getTaskStatesAsWorkUnitStates() {
ImmutableList.Builder<WorkUnitState> builder = ImmutableList.builder();
for (TaskState taskState : this.taskStates.values()) {
WorkUnitState workUnitState = new WorkUnitState(taskState.getWorkunit(), taskState.getJobState());
workUnitState.setId(taskState.getId());
workUnitState.addAll(taskState);
builder.add(workUnitState);
}
return builder.build();
}
/**
* Get the {@link LauncherTypeEnum} for this {@link JobState}.
*/
public LauncherTypeEnum getLauncherType() {
return Enums.getIfPresent(LauncherTypeEnum.class,
this.getProp(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, JobLauncherFactory.JobLauncherType.LOCAL.name()))
.or(LauncherTypeEnum.LOCAL);
}
/**
* Sets the {@link LauncherTypeEnum} for this {@link JobState}.
*/
public void setJobLauncherType(LauncherTypeEnum jobLauncherType) {
this.setProp(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, jobLauncherType.name());
}
/**
* Get the tracking URL for this {@link JobState}.
*/
public Optional<String> getTrackingURL() {
return Optional.fromNullable(this.getProp(ConfigurationKeys.JOB_TRACKING_URL_KEY));
}
@Override
public void readFields(DataInput in)
throws IOException {
Text text = new Text();
text.readFields(in);
this.jobName = text.toString().intern();
text.readFields(in);
this.jobId = text.toString().intern();
this.setId(this.jobId);
this.startTime = in.readLong();
this.endTime = in.readLong();
this.duration = in.readLong();
text.readFields(in);
this.state = RunningState.valueOf(text.toString());
this.taskCount = in.readInt();
int numTaskStates = in.readInt();
getTaskStateWithCommonAndSpecWuProps(numTaskStates, in);
super.readFields(in);
}
private void getTaskStateWithCommonAndSpecWuProps(int numTaskStates, DataInput in)
throws IOException {
Properties commonWuProps = new Properties();
for (int i = 0; i < numTaskStates; i++) {
TaskState taskState = new TaskState();
taskState.readFields(in);
if (i == 0) {
commonWuProps.putAll(taskState.getWorkunit().getProperties());
} else {
Properties newCommonWuProps = new Properties();
newCommonWuProps
.putAll(Maps.difference(commonWuProps, taskState.getWorkunit().getProperties()).entriesInCommon());
commonWuProps = newCommonWuProps;
}
this.taskStates.put(taskState.getTaskId().intern(), taskState);
}
ImmutableProperties immutableCommonProperties = new ImmutableProperties(commonWuProps);
for (TaskState taskState : this.taskStates.values()) {
Properties newSpecProps = new Properties();
newSpecProps.putAll(
Maps.difference(immutableCommonProperties, taskState.getWorkunit().getProperties()).entriesOnlyOnRight());
taskState.setWuProperties(immutableCommonProperties, newSpecProps);
}
}
@Override
public void write(DataOutput out)
throws IOException {
write(out, true, true);
}
public void write(DataOutput out, boolean writeTasks, boolean writePreviousWorkUnitStates)
throws IOException {
Text text = new Text();
text.set(this.jobName);
text.write(out);
text.set(this.jobId);
text.write(out);
out.writeLong(this.startTime);
out.writeLong(this.endTime);
out.writeLong(this.duration);
text.set(this.state.name());
text.write(out);
out.writeInt(this.taskCount);
if (writeTasks) {
out.writeInt(this.taskStates.size() + this.skippedTaskStates.size());
for (TaskState taskState : this.taskStates.values()) {
taskState.write(out);
}
for (TaskState taskState : this.skippedTaskStates.values()) {
taskState.write(out);
}
} else {
out.writeInt(0);
}
super.write(out, writePreviousWorkUnitStates);
}
/**
* Convert this {@link JobState} to a json document.
*
* @param jsonWriter a {@link com.google.gson.stream.JsonWriter}
* used to write the json document
* @param keepConfig whether to keep all configuration properties
* @throws IOException
*/
public void toJson(JsonWriter jsonWriter, boolean keepConfig)
throws IOException {
jsonWriter.beginObject();
writeStateSummary(jsonWriter);
jsonWriter.name("task states");
jsonWriter.beginArray();
for (TaskState taskState : this.taskStates.values()) {
taskState.toJson(jsonWriter, keepConfig);
}
for (TaskState taskState : this.skippedTaskStates.values()) {
taskState.toJson(jsonWriter, keepConfig);
}
jsonWriter.endArray();
if (keepConfig) {
jsonWriter.name("properties");
propsToJson(jsonWriter);
}
jsonWriter.endObject();
}
/**
* Write a summary to the json document
*
* @param jsonWriter a {@link com.google.gson.stream.JsonWriter}
* used to write the json document
*/
protected void writeStateSummary(JsonWriter jsonWriter) throws IOException {
jsonWriter.name("job name").value(this.getJobName()).name("job id").value(this.getJobId()).name("job state")
.value(this.getState().name()).name("start time").value(this.getStartTime()).name("end time")
.value(this.getEndTime()).name("duration").value(this.getDuration()).name("tasks").value(this.getTaskCount())
.name("completed tasks").value(this.getCompletedTasks());
}
protected void propsToJson(JsonWriter jsonWriter)
throws IOException {
jsonWriter.beginObject();
for (String key : this.getPropertyNames()) {
jsonWriter.name(key).value(this.getProp(key));
}
jsonWriter.endObject();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof JobState)) {
return false;
}
JobState other = (JobState) object;
return super.equals(other) && this.jobName.equals(other.jobName) && this.jobId.equals(other.jobId);
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + this.jobName.hashCode();
result = prime * result + this.jobId.hashCode();
return result;
}
/** @return pretty-printed JSON, without including properties */
@Override
public String toString() {
return toJsonString(false);
}
/** @return pretty-printed JSON, including all properties */
public String toJsonString() {
return toJsonString(true);
}
/** @return pretty-printed JSON, optionally including properties */
public String toJsonString(boolean includeProperties) {
StringWriter stringWriter = new StringWriter();
try (JsonWriter jsonWriter = new JsonWriter(stringWriter)) {
jsonWriter.setIndent("\t");
this.toJson(jsonWriter, includeProperties);
} catch (IOException ioe) {
// Ignored
}
return stringWriter.toString();
}
/**
* Convert this {@link JobState} instance to a {@link JobExecutionInfo} instance.
*
* @return a {@link JobExecutionInfo} instance
*/
public JobExecutionInfo toJobExecutionInfo() {
JobExecutionInfo jobExecutionInfo = new JobExecutionInfo();
jobExecutionInfo.setJobName(this.jobName);
jobExecutionInfo.setJobId(this.jobId);
if (this.startTime > 0) {
jobExecutionInfo.setStartTime(this.startTime);
}
if (this.endTime > 0) {
jobExecutionInfo.setEndTime(this.endTime);
}
jobExecutionInfo.setDuration(this.duration);
jobExecutionInfo.setState(JobStateEnum.valueOf(this.state.name()));
jobExecutionInfo.setLaunchedTasks(this.taskCount);
jobExecutionInfo.setCompletedTasks(this.getCompletedTasks());
jobExecutionInfo.setLauncherType(getLauncherType());
if (getTrackingURL().isPresent()) {
jobExecutionInfo.setTrackingUrl(getTrackingURL().get());
}
// Add task execution information
TaskExecutionInfoArray taskExecutionInfos = new TaskExecutionInfoArray();
for (TaskState taskState : this.getTaskStates()) {
taskExecutionInfos.add(taskState.toTaskExecutionInfo());
}
jobExecutionInfo.setTaskExecutions(taskExecutionInfos);
// Add job metrics
JobMetrics jobMetrics = JobMetrics.get(this);
MetricArray metricArray = new MetricArray();
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : jobMetrics.getMetricContext().getCounters()
.entrySet()) {
Metric counter = new Metric();
counter.setGroup(MetricGroup.JOB.name());
counter.setName(entry.getKey());
counter.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.COUNTER.name()));
counter.setValue(Long.toString(((Counter) entry.getValue()).getCount()));
metricArray.add(counter);
}
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : jobMetrics.getMetricContext().getMeters()
.entrySet()) {
Metric meter = new Metric();
meter.setGroup(MetricGroup.JOB.name());
meter.setName(entry.getKey());
meter.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.METER.name()));
meter.setValue(Double.toString(((Meter) entry.getValue()).getMeanRate()));
metricArray.add(meter);
}
for (Map.Entry<String, ? extends com.codahale.metrics.Metric> entry : jobMetrics.getMetricContext().getGauges()
.entrySet()) {
Metric gauge = new Metric();
gauge.setGroup(MetricGroup.JOB.name());
gauge.setName(entry.getKey());
gauge.setType(MetricTypeEnum.valueOf(GobblinMetrics.MetricType.GAUGE.name()));
gauge.setValue(((Gauge<?>) entry.getValue()).getValue().toString());
metricArray.add(gauge);
}
jobExecutionInfo.setMetrics(metricArray);
// Add job properties
Map<String, String> jobProperties = Maps.newHashMap();
for (String name : this.getPropertyNames()) {
String value = this.getProp(name);
if (!Strings.isNullOrEmpty(value)) {
jobProperties.put(name, value);
}
}
jobExecutionInfo.setJobProperties(new StringMap(jobProperties));
return jobExecutionInfo;
}
/**
* Create a new {@link JobState.DatasetState} based on this {@link JobState} instance.
*
* @param fullCopy whether to do a full copy of this {@link JobState} instance
* @return a new {@link JobState.DatasetState} object
*/
public DatasetState newDatasetState(boolean fullCopy) {
DatasetState datasetState = new DatasetState(this.jobName, this.jobId);
datasetState.setStartTime(this.startTime);
datasetState.setEndTime(this.endTime);
datasetState.setDuration(this.duration);
if (fullCopy) {
datasetState.setState(this.state);
datasetState.setTaskCount(this.taskCount);
datasetState.addTaskStates(this.taskStates.values());
datasetState.addSkippedTaskStates(this.skippedTaskStates.values());
}
return datasetState;
}
public static List<WorkUnitState> workUnitStatesFromDatasetStates(Iterable<JobState.DatasetState> datasetStates) {
ImmutableList.Builder<WorkUnitState> taskStateBuilder = ImmutableList.builder();
for (JobState datasetState : datasetStates) {
taskStateBuilder.addAll(datasetState.getTaskStatesAsWorkUnitStates());
}
return taskStateBuilder.build();
}
/**
* A subclass of {@link JobState} that is used to represent dataset states.
*
* <p>
* A {@code DatasetState} does <em>not</em> contain any properties. Operations such as {@link #getProp(String)}
* and {@link #setProp(String, Object)} are not supported.
* </p>
*/
public static class DatasetState extends JobState {
// For serialization/deserialization
public DatasetState() {
super();
}
public DatasetState(String jobName, String jobId) {
super(jobName, jobId);
}
public void setDatasetUrn(String datasetUrn) {
super.setProp(ConfigurationKeys.DATASET_URN_KEY, datasetUrn);
}
public String getDatasetUrn() {
return super.getProp(ConfigurationKeys.DATASET_URN_KEY, ConfigurationKeys.DEFAULT_DATASET_URN);
}
public void incrementJobFailures() {
super.setProp(ConfigurationKeys.JOB_FAILURES_KEY,
Integer.parseInt(super.getProp(ConfigurationKeys.JOB_FAILURES_KEY, "0")) + 1);
}
public void setNoJobFailure() {
super.setProp(ConfigurationKeys.JOB_FAILURES_KEY, 0);
}
public int getJobFailures() {
return Integer.parseInt(super.getProp(ConfigurationKeys.JOB_FAILURES_KEY));
}
@Override
protected void propsToJson(JsonWriter jsonWriter)
throws IOException {
jsonWriter.beginObject();
jsonWriter.name(ConfigurationKeys.DATASET_URN_KEY).value(getDatasetUrn());
jsonWriter.name(ConfigurationKeys.JOB_FAILURES_KEY).value(getJobFailures());
jsonWriter.endObject();
}
@Override
public String getProp(String key) {
throw new UnsupportedOperationException();
}
@Override
public String getProp(String key, String def) {
throw new UnsupportedOperationException();
}
@Override
public void setProp(String key, Object value) {
throw new UnsupportedOperationException();
}
@Override
public void addAll(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
public void addAllIfNotExist(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
public void overrideWith(Properties properties) {
throw new UnsupportedOperationException();
}
@Override
protected void writeStateSummary(JsonWriter jsonWriter)
throws IOException {
super.writeStateSummary(jsonWriter);
jsonWriter.name("datasetUrn").value(getDatasetUrn());
}
}
}
| 1,385 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/NoopDatasetStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.DatasetStateStore;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* An extension of {@link FsDatasetStateStore} where all operations are noop. Used to disable the state store.
*/
public class NoopDatasetStateStore extends FsDatasetStateStore {
@Alias("noop")
public static class Factory implements DatasetStateStore.Factory {
@Override
public DatasetStateStore<JobState.DatasetState> createStateStore(Config config) {
// dummy root dir for noop state store
Config config2 = config.withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef(""));
return FsDatasetStateStore.createStateStore(config2, NoopDatasetStateStore.class.getName());
}
}
public NoopDatasetStateStore(FileSystem fs, String storeRootDir, Integer threadPoolSize) {
super(fs, storeRootDir, threadPoolSize);
}
public NoopDatasetStateStore(FileSystem fs, String storeRootDir) {
super(fs, storeRootDir);
}
@Override
public List<JobState.DatasetState> getAll(String storeName, String tableName) throws IOException {
return Lists.newArrayList();
}
@Override
public List<JobState.DatasetState> getAll(String storeName) throws IOException {
return Lists.newArrayList();
}
@Override
public Map<String, JobState.DatasetState> getLatestDatasetStatesByUrns(String jobName) throws IOException {
return Maps.newHashMap();
}
@Override
public void persistDatasetState(String datasetUrn, JobState.DatasetState datasetState) throws IOException {}
@Override
public boolean create(String storeName) throws IOException {
return true;
}
@Override
public boolean create(String storeName, String tableName) throws IOException {
return true;
}
@Override
public boolean exists(String storeName, String tableName) throws IOException {
return false;
}
@Override
public void put(String storeName, String tableName, JobState.DatasetState state) throws IOException {}
@Override
public void putAll(String storeName, String tableName, Collection<JobState.DatasetState> states) throws IOException {}
@Override
public void createAlias(String storeName, String original, String alias) throws IOException {}
@Override
public void delete(String storeName, String tableName) throws IOException {}
@Override
public void delete(String storeName) throws IOException {}
}
| 1,386 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/TaskConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* A class that contains configuration keys for a {@link Task}
*/
public class TaskConfigurationKeys {
/**
* Configuration properties related to continuous / streaming mode
*/
public static final String TASK_EXECUTION_MODE = "task.executionMode";
public static final String DEFAULT_TASK_EXECUTION_MODE = "BATCH";
public static final String STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS = "streaming.watermark.commitIntervalMillis";
public static final Long DEFAULT_STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS = 2000L; // 2 seconds per commit
/**
* Configuration properties related to optimizations for single branch tasks
*/
public static final String TASK_IS_SINGLE_BRANCH_SYNCHRONOUS = "gobblin.task.is.single.branch.synchronous";
public static final String DEFAULT_TASK_IS_SINGLE_BRANCH_SYNCHRONOUS = Boolean.toString(false);
public static final String TASK_SKIP_ERROR_RECORDS = "task.skip.error.records";
public static final long DEFAULT_TASK_SKIP_ERROR_RECORDS = 0;
}
| 1,387 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/ForkBranchMismatchException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
/**
* A type of {@link java.lang.Exception}s thrown when the number of schemas or
* data records returned by a {@link org.apache.gobblin.fork.ForkOperator}
* is not equal to the number of declared branches.
*
* @author Yinan Li
*/
public class ForkBranchMismatchException extends Exception {
private static final long serialVersionUID = 1L;
public ForkBranchMismatchException(String message) {
super(message);
}
}
| 1,388 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/commit/FsCommitSequenceStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.commit;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.gson.Gson;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.CommitSequenceStore;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
/**
* An implementation of {@link CommitSequenceStore} backed by a {@link FileSystem}.
*
* <p>
* This implementation serializes a {@link CommitSequence} along with all its {@link CommitStep}s into a
* JSON string using {@link Gson}. Thus it requires that all {@link CommitStep}s can be serialized
* and deserialized with {@link Gson}.
* </p>
*
* @author Ziyang Liu
*/
@Alpha
public class FsCommitSequenceStore implements CommitSequenceStore {
public static final String GOBBLIN_RUNTIME_COMMIT_SEQUENCE_STORE_FS_URI =
"gobblin.runtime.commit.sequence.store.fs.uri";
public static final String GOBBLIN_RUNTIME_COMMIT_SEQUENCE_STORE_DIR = "gobblin.runtime.commit.sequence.store.dir";
private static final String DEFAULT_DATASET_URN = "default_dataset_urn";
private static final Gson GSON = GsonInterfaceAdapter.getGson(CommitStep.class);
private final FileSystem fs;
private final Path rootPath;
public FsCommitSequenceStore(FileSystem fs, Path rootPath) {
this.fs = fs;
this.rootPath = rootPath;
}
@Override
public boolean exists(String jobName) throws IOException {
Path jobPath = new Path(this.rootPath, jobName);
return this.fs.exists(jobPath);
}
@Override
public boolean exists(String jobName, String datasetUrn) throws IOException {
Path datasetPath = new Path(new Path(this.rootPath, jobName), sanitizeDatasetUrn(datasetUrn));
return this.fs.exists(datasetPath);
}
@Override
public void delete(String jobName) throws IOException {
Path jobPath = new Path(this.rootPath, jobName);
HadoopUtils.deletePathAndEmptyAncestors(this.fs, jobPath, true);
}
@Override
public void delete(String jobName, String datasetUrn) throws IOException {
Path jobPath = new Path(this.rootPath, jobName);
Path datasetPath = new Path(jobPath, sanitizeDatasetUrn(datasetUrn));
HadoopUtils.deletePathAndEmptyAncestors(this.fs, datasetPath, true);
}
@Override
public void put(String jobName, String datasetUrn, CommitSequence commitSequence) throws IOException {
datasetUrn = sanitizeDatasetUrn(datasetUrn);
if (exists(jobName, datasetUrn)) {
throw new IOException(String.format("CommitSequence already exists for job %s, dataset %s", jobName, datasetUrn));
}
Path jobPath = new Path(this.rootPath, jobName);
this.fs.mkdirs(jobPath);
Path datasetPath = new Path(jobPath, datasetUrn);
try (DataOutputStream dos = this.fs.create(datasetPath)) {
dos.writeBytes(GSON.toJson(commitSequence));
}
}
@Override
public Collection<String> get(String jobName) throws IOException {
ImmutableList.Builder<String> builder = new ImmutableList.Builder<>();
Path jobPath = new Path(this.rootPath, jobName);
if (this.fs.exists(jobPath)) {
for (FileStatus status : this.fs.listStatus(jobPath, new HiddenFilter())) {
builder.add(status.getPath().getName());
}
}
return builder.build();
}
@Override
public Optional<CommitSequence> get(String jobName, String datasetUrn) throws IOException {
if (!exists(jobName, datasetUrn)) {
return Optional.<CommitSequence> absent();
}
Path datasetPath = new Path(new Path(this.rootPath, jobName), sanitizeDatasetUrn(datasetUrn));
try (InputStream is = this.fs.open(datasetPath)) {
return Optional
.of(GSON.fromJson(IOUtils.toString(is, ConfigurationKeys.DEFAULT_CHARSET_ENCODING), CommitSequence.class));
}
}
/**
* Replace a null or empty dataset URN with {@link #DEFAULT_DATASET_URN}, and replaces illegal HDFS
* characters with '_'.
*/
private static String sanitizeDatasetUrn(String datasetUrn) {
return Strings.isNullOrEmpty(datasetUrn) ? DEFAULT_DATASET_URN : HadoopUtils.sanitizePath(datasetUrn, "_");
}
}
| 1,389 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/commit/DatasetStateCommitStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.commit;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.util.ClassAliasResolver;
import java.io.IOException;
import com.google.common.base.Preconditions;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.commit.CommitStepBase;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobState.DatasetState;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of {@link CommitStep} for persisting dataset states.
*
* @author Ziyang Liu
*/
@Alpha
@Slf4j
public class DatasetStateCommitStep extends CommitStepBase {
private final String datasetUrn;
private final DatasetState datasetState;
private transient DatasetStateStore stateStore;
private DatasetStateCommitStep(Builder<? extends Builder<?>> builder) {
super(builder);
this.datasetUrn = builder.datasetUrn;
this.datasetState = builder.datasetState;
}
public static class Builder<T extends Builder<?>> extends CommitStepBase.Builder<T> {
private String datasetUrn;
private DatasetState datasetState;
public Builder() {
super();
}
public Builder(CommitSequence.Builder commitSequenceBuilder) {
super(commitSequenceBuilder);
}
@SuppressWarnings("unchecked")
public T withDatasetUrn(String datasetUrn) {
this.datasetUrn = datasetUrn;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withDatasetState(DatasetState datasetState) {
this.datasetState = datasetState;
return (T) this;
}
@Override
public CommitStep build() {
Preconditions.checkNotNull(this.datasetUrn);
Preconditions.checkNotNull(this.datasetState);
return new DatasetStateCommitStep(this);
}
}
@Override
public boolean isCompleted() throws IOException {
Preconditions.checkNotNull(this.datasetState);
return this.datasetState
.equals(getDatasetStateStore().getLatestDatasetState(this.datasetState.getJobName(), this.datasetUrn));
}
@Override
public void execute() throws IOException {
log.info("Persisting dataset state for dataset " + this.datasetUrn);
getDatasetStateStore().persistDatasetState(this.datasetUrn, this.datasetState);
}
private DatasetStateStore getDatasetStateStore() throws IOException {
if (this.stateStore == null) {
ClassAliasResolver<DatasetStateStore.Factory> resolver =
new ClassAliasResolver<>(DatasetStateStore.Factory.class);
String stateStoreType = this.props.getProp(ConfigurationKeys.DATASET_STATE_STORE_TYPE_KEY,
this.props.getProp(ConfigurationKeys.STATE_STORE_TYPE_KEY, ConfigurationKeys.DEFAULT_STATE_STORE_TYPE));
try {
DatasetStateStore.Factory stateStoreFactory =
resolver.resolveClass(stateStoreType).newInstance();
this.stateStore = stateStoreFactory.createStateStore(ConfigFactory.parseProperties(props.getProperties()));
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
return this.stateStore;
}
}
| 1,390 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metrics/ServiceGobblinJobMetricReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metrics;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.JobState;
/**
* A metrics reporter to report job-level metrics to Gobblin-as-a-Service
* Metrics should have the name FLOW_GROUP.FLOW_NAME.EDGE_ID.METRIC_NAME
* If edge ID does not exist due to a different flowgraph being used, use the jobName as default
*/
public class ServiceGobblinJobMetricReporter implements GobblinJobMetricReporter {
static String FLOW_EDGE_ID_KEY = "flow.edge.id";
private Optional<MetricContext> metricContext;
public ServiceGobblinJobMetricReporter(Optional<MetricContext> metricContext) {
this.metricContext = metricContext;
}
public void reportWorkUnitCreationTimerMetrics(TimingEvent workUnitsCreationTimer, JobState jobState) {
if (!this.metricContext.isPresent() || !jobState.getPropAsBoolean(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, true)) {
return;
}
String workunitCreationGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, jobState.getProp(ConfigurationKeys.FLOW_GROUP_KEY),
jobState.getProp(ConfigurationKeys.FLOW_NAME_KEY), jobState.getProp(FLOW_EDGE_ID_KEY, jobState.getJobName()), TimingEvent.LauncherTimings.WORK_UNITS_CREATION);
long workUnitsCreationTime = workUnitsCreationTimer.getDuration() / TimeUnit.SECONDS.toMillis(1);
ContextAwareGauge<Integer> workunitCreationGauge =
this.metricContext.get().newContextAwareGauge(workunitCreationGaugeName, () -> (int) workUnitsCreationTime);
this.metricContext.get().register(workunitCreationGaugeName, workunitCreationGauge);
}
public void reportWorkUnitCountMetrics(int workUnitCount, JobState jobState) {
if (!this.metricContext.isPresent() || !jobState.getPropAsBoolean(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, true)) {
return;
}
String workunitCountGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, jobState.getProp(ConfigurationKeys.FLOW_GROUP_KEY),
jobState.getProp(ConfigurationKeys.FLOW_NAME_KEY), jobState.getProp(FLOW_EDGE_ID_KEY, jobState.getJobName()), JobEvent.WORK_UNITS_CREATED);
ContextAwareGauge<Integer> workunitCountGauge = this.metricContext.get()
.newContextAwareGauge(workunitCountGaugeName, () -> Integer.valueOf(workUnitCount));
this.metricContext.get().register(workunitCountGaugeName, workunitCountGauge);
}
}
| 1,391 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metrics/GobblinJobMetricReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metrics;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.JobState;
public interface GobblinJobMetricReporter {
void reportWorkUnitCreationTimerMetrics(TimingEvent workUnitsCreationTimer, JobState jobState);
void reportWorkUnitCountMetrics(int workUnitCount, JobState jobState);
}
| 1,392 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metrics/RuntimeMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metrics;
import org.apache.gobblin.metrics.ServiceMetricNames;
/**
* Constants used for naming {@link org.apache.gobblin.metrics.Metric}s and metric metadata in gobblin-runtime.
*/
public class RuntimeMetrics {
// Metric names
public static final String GOBBLIN_KAFKA_HIGH_LEVEL_CONSUMER_MESSAGES_READ =
"gobblin.kafka.highLevelConsumer.messagesRead";
public static final String GOBBLIN_KAFKA_HIGH_LEVEL_CONSUMER_QUEUE_SIZE_PREFIX = "gobblin.kafka.highLevelConsumer.queueSize";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_TOTAL_SPECS = "gobblin.jobMonitor.kafka.totalSpecs";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_NEW_SPECS = "gobblin.jobMonitor.kafka.newSpecs";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_UPDATED_SPECS = "gobblin.jobMonitor.kafka.updatedSpecs";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_REMOVED_SPECS = "gobblin.jobMonitor.kafka.removedSpecs";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_CANCELLED_SPECS = "gobblin.jobMonitor.kafka.cancelledSpecs";
public static final String GOBBLIN_JOB_MONITOR_SLAEVENT_REJECTEDEVENTS = "gobblin.jobMonitor.slaevent.rejectedevents";
public static final String GOBBLIN_JOB_MONITOR_KAFKA_MESSAGE_PARSE_FAILURES =
"gobblin.jobMonitor.kafka.messageParseFailures";
public static final String SPEC_STORE_MONITOR_PREFIX = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "specStoreMonitor.";
public static final String GOBBLIN_SPEC_STORE_MONITOR_SUCCESSFULLY_ADDED_SPECS = SPEC_STORE_MONITOR_PREFIX + "successful.added.specs";
public static final String GOBBLIN_SPEC_STORE_MONITOR_FAILED_ADDED_SPECS = SPEC_STORE_MONITOR_PREFIX + "failed.added.specs";
public static final String GOBBLIN_SPEC_STORE_MONITOR_DELETED_SPECS = SPEC_STORE_MONITOR_PREFIX + "deleted.specs";
public static final String GOBBLIN_SPEC_STORE_MONITOR_UNEXPECTED_ERRORS = SPEC_STORE_MONITOR_PREFIX + "unexpected.errors";
public static final String GOBBLIN_SPEC_STORE_MESSAGE_PROCESSED = SPEC_STORE_MONITOR_PREFIX + "message.processed";
public static final String GOBBLIN_SPEC_STORE_DUPLICATE_MESSAGES = SPEC_STORE_MONITOR_PREFIX + "duplicateMessages";
public static final String GOBBLIN_SPEC_STORE_HEARTBEAT_MESSAGES = SPEC_STORE_MONITOR_PREFIX + "heartbeatMessages";
public static final String GOBBLIN_SPEC_STORE_PRODUCE_TO_CONSUME_DELAY_MILLIS = SPEC_STORE_MONITOR_PREFIX + "produce.to.consume.delay";
public static final String DAG_ACTION_STORE_MONITOR_PREFIX = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "dagActionStoreMonitor.";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_KILLS_INVOKED = DAG_ACTION_STORE_MONITOR_PREFIX + "kills.invoked";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_MESSAGE_PROCESSED = DAG_ACTION_STORE_MONITOR_PREFIX + "message.processed";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_DUPLICATE_MESSAGES = DAG_ACTION_STORE_MONITOR_PREFIX + "duplicateMessages";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_HEARTBEAT_MESSAGES = DAG_ACTION_STORE_MONITOR_PREFIX + "heartbeatMessages";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_NULL_DAG_ACTION_TYPE_MESSAGES = DAG_ACTION_STORE_MONITOR_PREFIX + "nullDagActionTypeMessages";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_RESUMES_INVOKED = DAG_ACTION_STORE_MONITOR_PREFIX + "resumes.invoked";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_FLOWS_LAUNCHED = DAG_ACTION_STORE_MONITOR_PREFIX + "flows.launched";
public static final String GOBBLIN_DAG_ACTION_STORE_FAILED_FLOW_LAUNCHED_SUBMISSIONS = DAG_ACTION_STORE_MONITOR_PREFIX + "failedFlowLaunchSubmissions";
public static final String GOBBLIN_DAG_ACTION_STORE_MONITOR_UNEXPECTED_ERRORS = DAG_ACTION_STORE_MONITOR_PREFIX + "unexpected.errors";
public static final String
GOBBLIN_DAG_ACTION_STORE_PRODUCE_TO_CONSUME_DELAY_MILLIS = DAG_ACTION_STORE_MONITOR_PREFIX + "produce.to.consume.delay";
public static final String GOBBLIN_MYSQL_QUOTA_MANAGER_UNEXPECTED_ERRORS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "gobblin.mysql.quota.manager.unexpected.errors";
public static final String GOBBLIN_MYSQL_QUOTA_MANAGER_QUOTA_REQUESTS_EXCEEDED = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "gobblin.mysql.quota.manager.quotaRequests.exceeded";
public static final String GOBBLIN_MYSQL_QUOTA_MANAGER_TIME_TO_CHECK_QUOTA = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "gobblin.mysql.quota.manager.time.to.check.quota";
// The following metrics are used to identify the bottlenecks for initializing the job scheduler
public static final String
GOBBLIN_JOB_SCHEDULER_GET_SPECS_DURING_STARTUP_PER_SPEC_RATE_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.getSpecsDuringStartupPerSpecRateNanos";
public static final String GOBBLIN_JOB_SCHEDULER_LOAD_SPECS_BATCH_SIZE = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.loadSpecBatchSize";
public static final String
GOBBLIN_JOB_SCHEDULER_TIME_TO_INITIALIZE_SCHEDULER_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.timeToInitializeSchedulerNanos";
public static final String
GOBBLIN_JOB_SCHEDULER_TIME_TO_OBTAIN_SPEC_URIS_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.timeToObtainSpecUrisNanos";
public static final String
GOBBLIN_JOB_SCHEDULER_INDIVIDUAL_GET_SPEC_SPEED_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.individualGetSpecSpeedNanos";
public static final String
GOBBLIN_JOB_SCHEDULER_EACH_COMPLETE_ADD_SPEC_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.eachCompleteAddSpecNanos";
public static final String
GOBBLIN_JOB_SCHEDULER_EACH_SPEC_COMPILATION_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.eachSpecCompilationNanos";
public static final String GOBBLIN_JOB_SCHEDULER_EACH_SCHEDULE_JOB_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.eachScheduleJobNanos";
public static final String GOBBLIN_JOB_SCHEDULER_TOTAL_GET_SPEC_TIME_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.totalGetSpecTimeNanos";
public static final String GOBBLIN_JOB_SCHEDULER_TOTAL_ADD_SPEC_TIME_NANOS = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.totalAddSpecTimeNanos";
public static final String GOBBLIN_JOB_SCHEDULER_NUM_JOBS_SCHEDULED_DURING_STARTUP = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "jobScheduler.numJobsScheduledDuringStartup";
// Metadata keys
public static final String TOPIC = "topic";
public static final String GROUP_ID = "groupId";
public static final String SCHEMA = "schema";
}
| 1,393 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metrics/DefaultGobblinJobMetricReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metrics;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.JobState;
/**
* A metrics reporter that reports only workunitsCreationTimer - which is the current default behavior for all Gobblin jobs not emitted by GaaS
* Emit metrics with JobMetrics as the prefix
*/
public class DefaultGobblinJobMetricReporter implements GobblinJobMetricReporter {
private Optional<MetricContext> metricContext;
public DefaultGobblinJobMetricReporter(Optional<MetricContext> metricContext) {
this.metricContext = metricContext;
}
public void reportWorkUnitCreationTimerMetrics(TimingEvent workUnitsCreationTimer, JobState jobState) {
if (!this.metricContext.isPresent()) {
return;
}
String workunitCreationGaugeName = MetricRegistry.name(ServiceMetricNames.GOBBLIN_JOB_METRICS_PREFIX,
TimingEvent.LauncherTimings.WORK_UNITS_CREATION, jobState.getJobName());
long workUnitsCreationTime = workUnitsCreationTimer.getDuration() / TimeUnit.SECONDS.toMillis(1);
ContextAwareGauge<Integer> workunitCreationGauge = this.metricContext.get()
.newContextAwareGauge(workunitCreationGaugeName, () -> (int) workUnitsCreationTime);
this.metricContext.get().register(workunitCreationGaugeName, workunitCreationGauge);
}
public void reportWorkUnitCountMetrics(int workUnitCount, JobState jobstate) {
return;
}
}
| 1,394 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/locks/JobLockFactoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.locks;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
/**
* A factory class for {@link JobLockFactory} instances. It allows factories to be configured
* using Gobblin instance configuration.
*
* <p>Implementations of this interface must define the default constructor</p>
*/
public interface JobLockFactoryManager<T extends JobLock, F extends JobLockFactory<T>> {
/** Provides an instance of a job lock factory with the specified config. If an instance with
* the same configuration (implementation-specific) already exists, the old instance may be
* returned to avoid race condition. This behavior is implementation-specific. */
F getJobLockFactory(Config sysCfg, Optional<Logger> log);
}
| 1,395 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/locks/JobLockException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.locks;
/**
* The exception thrown when a {@link JobLock} cannot be initialized, acquired,
* release, etc.
*
* @author joelbaranick
*/
public class JobLockException extends Exception {
public JobLockException(Throwable cause) {
super(cause);
}
public JobLockException(String message, Throwable cause) {
super(message, cause);
}
public JobLockException(String message) {
super(message);
}
}
| 1,396 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/locks/FileBasedJobLockFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.locks;
import java.io.IOException;
import java.net.URI;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.util.HadoopUtils;
/**
* A factory for file-based job locks. All locks are presented as files under a common directory.
* If the directory does not exist, it will be automatically created and removed on close().
*/
public class FileBasedJobLockFactory implements JobLockFactory<FileBasedJobLock> {
/** The URI of the file system with the directory for lock files*/
public static final String FS_URI_CONFIG = "fsURI";
/** The path to the directory for lock files*/
public static final String LOCK_DIR_CONFIG = "lockDir";
static final String DEFAULT_LOCK_DIR_PREFIX = "/tmp/gobblin-job-locks-";
/**
* Default waiting period (5 minutes).
* TODO add configuration support
*/
static final long DEFAULT_WAIT_MS = 300000;
private final FileSystem fs;
private final Path lockFileDir;
private final Logger log;
private final boolean deleteLockDirOnClose;
/** Constructs a new factory
* @throws IOException */
public FileBasedJobLockFactory(FileSystem fs, String lockFileDir, Optional<Logger> log)
throws IOException {
this.fs = fs;
this.lockFileDir = new Path(lockFileDir);
this.log = log.or(LoggerFactory.getLogger(getClass().getName() + "-" + lockFileDir));
this.deleteLockDirOnClose = !this.fs.exists(this.lockFileDir);
if (deleteLockDirOnClose) {
createLockDir(this.fs, this.lockFileDir);
}
}
public FileBasedJobLockFactory(FileSystem fs, String lockFileDir) throws IOException {
this(fs, lockFileDir, Optional.<Logger>absent());
}
/** Create a new instance using the specified factory and hadoop configurations. */
public static FileBasedJobLockFactory create(Config factoryConfig,
Configuration hadoopConf,
Optional<Logger> log)
throws IOException {
FileSystem fs = factoryConfig.hasPath(FS_URI_CONFIG) ?
FileSystem.get(URI.create(factoryConfig.getString(FS_URI_CONFIG)), hadoopConf) :
getDefaultFileSystem(hadoopConf);
String lockFilesDir = factoryConfig.hasPath(LOCK_DIR_CONFIG) ?
factoryConfig.getString(LOCK_DIR_CONFIG) :
getDefaultLockDir(fs, log);
return new FileBasedJobLockFactory(fs, lockFilesDir, log);
}
public static FileSystem getDefaultFileSystem(Configuration hadoopConf) throws IOException {
return FileSystem.getLocal(hadoopConf);
}
public static String getDefaultLockDir(FileSystem fs, Optional<Logger> log) {
Random rng = new Random();
Path dirName;
try
{
do {
dirName = new Path(DEFAULT_LOCK_DIR_PREFIX + rng.nextLong());
} while (fs.exists(dirName));
} catch (IllegalArgumentException | IOException e) {
throw new RuntimeException("Unable to create job lock directory: " + e, e);
}
if (log.isPresent()) {
log.get().info("Created default job lock directory: " + dirName);
}
return dirName.toString();
}
protected static void createLockDir(FileSystem fs, Path dirName) throws IOException {
if (!fs.mkdirs(dirName, getDefaultDirPermissions())) {
throw new RuntimeException("Unable to create job lock directory: " + dirName);
}
}
protected static FsPermission getDefaultDirPermissions() {
return new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE);
}
Path getLockFile(String jobName) {
return new Path(lockFileDir, jobName + FileBasedJobLock.LOCK_FILE_EXTENSION);
}
/**
* Acquire the lock.
*
* @throws JobLockException thrown if the {@link JobLock} fails to be acquired
*/
void lock(Path lockFile) throws JobLockException {
log.debug("Creating lock: {}", lockFile);
try {
if (!this.fs.createNewFile(lockFile)) {
throw new JobLockException("Failed to create lock file " + lockFile.getName());
}
} catch (IOException e) {
throw new JobLockException(e);
}
}
/**
* Release the lock.
*
* @throws JobLockException thrown if the {@link JobLock} fails to be released
*/
void unlock(Path lockFile) throws JobLockException {
log.debug("Removing lock: {}", lockFile);
if (!isLocked(lockFile)) {
return;
}
try {
this.fs.delete(lockFile, false);
} catch (IOException e) {
throw new JobLockException(e);
}
}
/**
* Try locking the lock.
*
* @return <em>true</em> if the lock is successfully locked,
* <em>false</em> if otherwise.
* @throws JobLockException thrown if the {@link JobLock} fails to be acquired
*/
boolean tryLock(Path lockFile) throws JobLockException {
log.debug("Attempting lock: {}", lockFile);
try {
return this.fs.createNewFile(lockFile);
} catch (IOException e) {
throw new JobLockException(e);
}
}
/**
* Check if the lock is locked.
*
* @return if the lock is locked
* @throws JobLockException thrown if checking the status of the {@link JobLock} fails
*/
boolean isLocked(Path lockFile) throws JobLockException {
try {
return this.fs.exists(lockFile);
} catch (IOException e) {
throw new JobLockException(e);
}
}
public static Config getConfigForProperties(Properties properties) {
return ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(FS_URI_CONFIG, properties.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI))
.put(LOCK_DIR_CONFIG, properties.getProperty(FileBasedJobLock.JOB_LOCK_DIR))
.build());
}
public static FileBasedJobLockFactory createForProperties(Properties properties)
throws JobLockException {
try {
FileSystem fs = FileSystem.get(
URI.create(properties.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)),
HadoopUtils.getConfFromProperties(properties));
String lockFileDir = properties.getProperty(FileBasedJobLock.JOB_LOCK_DIR);
return new FileBasedJobLockFactory(fs, lockFileDir);
} catch (IOException e) {
throw new JobLockException(e);
}
}
@Override
public FileBasedJobLock getJobLock(JobSpec jobSpec) throws TimeoutException {
String jobName = getJobName(jobSpec);
return new FileBasedJobLock(jobName, this);
}
@VisibleForTesting
static String getJobName(JobSpec jobSpec) {
return jobSpec.getUri().toString().replaceAll("[/.:]", "_");
}
@VisibleForTesting
FileSystem getFs() {
return fs;
}
@VisibleForTesting
Path getLockFileDir() {
return lockFileDir;
}
@Override
public void close() throws IOException {
if (this.deleteLockDirOnClose) {
this.log.info("Delete auto-created lock directory: {}", getLockFileDir());
if (!this.fs.delete(getLockFileDir(), true)) {
this.log.warn("Failed to delete lock directory: {}", getLockFileDir());
}
}
}
}
| 1,397 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/locks/ZookeeperBasedJobLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.locks;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.locks.InterProcessLock;
import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of {@link JobLock} that uses Zookeeper.
*
* @author Joel Baranick
*/
@Slf4j
public class ZookeeperBasedJobLock implements ListenableJobLock {
private static final String LOCKS_ROOT_PATH = "/locks";
private static final String CONNECTION_STRING_DEFAULT = "localhost:2181";
private static final int LOCKS_ACQUIRE_TIMEOUT_MILLISECONDS_DEFAULT = 5000;
private static final int CONNECTION_TIMEOUT_SECONDS_DEFAULT = 30;
private static final int SESSION_TIMEOUT_SECONDS_DEFAULT = 180;
private static final int RETRY_BACKOFF_SECONDS_DEFAULT = 1;
private static final int MAX_RETRY_COUNT_DEFAULT = 10;
private static CuratorFramework curatorFramework;
private static ConcurrentMap<String, JobLockEventListener> lockEventListeners = Maps.newConcurrentMap();
private static Thread curatorFrameworkShutdownHook;
public static final String LOCKS_ACQUIRE_TIMEOUT_MILLISECONDS = "gobblin.locks.zookeeper.acquire.timeout_milliseconds";
public static final String CONNECTION_STRING = "gobblin.locks.zookeeper.connection_string";
public static final String CONNECTION_TIMEOUT_SECONDS = "gobblin.locks.zookeeper.connection.timeout_seconds";
public static final String SESSION_TIMEOUT_SECONDS = "gobblin.locks.zookeeper.session.timeout_seconds";
public static final String RETRY_BACKOFF_SECONDS = "gobblin.locks.zookeeper.retry.backoff_seconds";
public static final String MAX_RETRY_COUNT = "gobblin.locks.zookeeper.retry.max_count";
private String lockPath;
private long lockAcquireTimeoutMilliseconds;
private InterProcessLock lock;
public ZookeeperBasedJobLock(Properties properties) {
this(properties, properties.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
public ZookeeperBasedJobLock(Properties properties, String jobName) {
this.lockAcquireTimeoutMilliseconds =
getLong(properties, LOCKS_ACQUIRE_TIMEOUT_MILLISECONDS, LOCKS_ACQUIRE_TIMEOUT_MILLISECONDS_DEFAULT);
this.lockPath = Paths.get(LOCKS_ROOT_PATH, jobName).toString();
initializeCuratorFramework(properties);
lock = new InterProcessSemaphoreMutex(curatorFramework, lockPath);
}
/**
* Sets the job lock listener.
*
* @param jobLockEventListener the listener for lock events
*/
@Override
public void setEventListener(JobLockEventListener jobLockEventListener) {
lockEventListeners.putIfAbsent(this.lockPath, jobLockEventListener);
}
/**
* Acquire the lock.
*
* @throws IOException
*/
@Override
public void lock() throws JobLockException {
try {
this.lock.acquire();
} catch (Exception e) {
throw new JobLockException("Failed to acquire lock " + this.lockPath, e);
}
}
/**
* Release the lock.
*
* @throws IOException
*/
@Override
public void unlock() throws JobLockException {
if (this.lock.isAcquiredInThisProcess()) {
try {
this.lock.release();
} catch (Exception e) {
throw new JobLockException("Failed to release lock " + this.lockPath, e);
}
}
}
/**
* Try locking the lock.
*
* @return <em>true</em> if the lock is successfully locked,
* <em>false</em> if otherwise.
* @throws IOException
*/
@Override
public boolean tryLock() throws JobLockException {
try {
return this.lock.acquire(lockAcquireTimeoutMilliseconds, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new JobLockException("Failed to acquire lock " + this.lockPath, e);
}
}
/**
* Check if the lock is locked.
*
* @return if the lock is locked
* @throws IOException
*/
@Override
public boolean isLocked() throws JobLockException {
return this.lock.isAcquiredInThisProcess();
}
/**
* Closes this stream and releases any system resources associated
* with it. If the stream is already closed then invoking this
* method has no effect.
*
* @throws IOException if an I/O error occurs
*/
@Override
public void close() throws IOException {
try {
this.unlock();
} catch (JobLockException e) {
throw new IOException(e);
} finally {
lockEventListeners.remove(this.lockPath);
}
}
private synchronized static void initializeCuratorFramework(Properties properties) {
if (curatorFrameworkShutdownHook == null) {
curatorFrameworkShutdownHook = new CuratorFrameworkShutdownHook();
Runtime.getRuntime().addShutdownHook(curatorFrameworkShutdownHook);
}
if (curatorFramework == null) {
CuratorFramework newCuratorFramework = CuratorFrameworkFactory.builder()
.connectString(properties.getProperty(CONNECTION_STRING, CONNECTION_STRING_DEFAULT))
.connectionTimeoutMs(
getMilliseconds(properties, CONNECTION_TIMEOUT_SECONDS, CONNECTION_TIMEOUT_SECONDS_DEFAULT))
.sessionTimeoutMs(
getMilliseconds(properties, SESSION_TIMEOUT_SECONDS, SESSION_TIMEOUT_SECONDS_DEFAULT))
.retryPolicy(new ExponentialBackoffRetry(
getMilliseconds(properties, RETRY_BACKOFF_SECONDS, RETRY_BACKOFF_SECONDS_DEFAULT),
getInt(properties, MAX_RETRY_COUNT, MAX_RETRY_COUNT_DEFAULT)))
.build();
newCuratorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() {
@Override
public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) {
switch (connectionState) {
case LOST:
log.warn("Lost connection with zookeeper");
for (Map.Entry<String, JobLockEventListener> lockEventListener : lockEventListeners.entrySet()) {
log.warn("Informing job {} that lock was lost", lockEventListener.getKey());
lockEventListener.getValue().onLost();
}
break;
case SUSPENDED:
log.warn("Suspended connection with zookeeper");
for (Map.Entry<String, JobLockEventListener> lockEventListener : lockEventListeners.entrySet()) {
log.warn("Informing job {} that lock was suspended", lockEventListener.getKey());
lockEventListener.getValue().onLost();
}
break;
case CONNECTED:
log.info("Connected with zookeeper");
break;
case RECONNECTED:
log.warn("Regained connection with zookeeper");
break;
case READ_ONLY:
log.warn("Zookeeper connection went into read-only mode");
break;
}
}
});
newCuratorFramework.start();
try {
if (!newCuratorFramework.blockUntilConnected(
getInt(properties, CONNECTION_TIMEOUT_SECONDS, CONNECTION_TIMEOUT_SECONDS_DEFAULT),
TimeUnit.SECONDS)) {
throw new RuntimeException("Time out while waiting to connect to zookeeper");
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting to connect to zookeeper");
}
curatorFramework = newCuratorFramework;
}
}
@VisibleForTesting
static synchronized void shutdownCuratorFramework() {
if (curatorFramework != null) {
curatorFramework.close();
curatorFramework = null;
}
}
private static int getInt(Properties properties, String key, int defaultValue) {
return Integer.parseInt(properties.getProperty(key, Integer.toString(defaultValue)));
}
private static long getLong(Properties properties, String key, long defaultValue) {
return Long.parseLong(properties.getProperty(key, Long.toString(defaultValue)));
}
private static int getMilliseconds(Properties properties, String key, int defaultValue) {
return getInt(properties, key, defaultValue) * 1000;
}
private static class CuratorFrameworkShutdownHook extends Thread {
public void run() {
log.info("Shutting down curator framework...");
try {
shutdownCuratorFramework();
log.info("Curator framework shut down.");
} catch (Exception e) {
log.error("Error while shutting down curator framework.", e);
}
}
}
}
| 1,398 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/locks/JobLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.locks;
import java.io.Closeable;
/**
* A interface for claiming exclusive right to proceed for each scheduled
* run of a job.
*
* <p>
* By acquiring a {@link JobLock} before a scheduled run of a job
* can proceed, it is guaranteed that no more than one instance of
* a job is running at any time.
* </p>
*
* @author Yinan Li
*/
public interface JobLock extends Closeable {
/**
* Acquire the lock.
*
* @throws JobLockException thrown if the {@link JobLock} fails to be acquired
*/
void lock()
throws JobLockException;
/**
* Release the lock.
*
* @throws JobLockException thrown if the {@link JobLock} fails to be released
*/
void unlock()
throws JobLockException;
/**
* Try locking the lock.
*
* @return <em>true</em> if the lock is successfully locked,
* <em>false</em> if otherwise.
* @throws JobLockException thrown if the {@link JobLock} fails to be acquired
*/
boolean tryLock()
throws JobLockException;
/**
* Check if the lock is locked.
*
* @return if the lock is locked
* @throws JobLockException thrown if checking the status of the {@link JobLock} fails
*/
boolean isLocked()
throws JobLockException;
}
| 1,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.