index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ContainerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.List; import java.util.concurrent.Callable; import com.google.common.collect.ImmutableList; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metrics.GobblinMetrics; import org.apache.gobblin.metrics.Tag; /** * Extension of {@link GobblinMetrics} specifically for {@link GobblinTaskRunner}s. */ @Alpha public class ContainerMetrics extends GobblinMetrics { protected ContainerMetrics(State containerState, String applicationName, String taskRunnerId) { super(name(taskRunnerId), null, tagsForContainer(containerState, applicationName, taskRunnerId)); } /** * Get a {@link ContainerMetrics} instance given the {@link State} of a container, the name of the application the * container belongs to, and the workerId of the container. * * @param containerState the {@link State} of the container * @param applicationName a {@link String} representing the name of the application the container belongs to * @return a {@link ContainerMetrics} instance */ public static ContainerMetrics get(final State containerState, final String applicationName, final String workerId) { return (ContainerMetrics) GOBBLIN_METRICS_REGISTRY.getOrCreate(name(workerId), new Callable<GobblinMetrics>() { @Override public GobblinMetrics call() throws Exception { return new ContainerMetrics(containerState, applicationName, workerId); } }); } private static String name(String workerId) { return "gobblin.metrics." + workerId; } private static List<Tag<?>> tagsForContainer(State containerState, String applicationName, String taskRunnerId) { ImmutableList.Builder<Tag<?>> tags = new ImmutableList.Builder<>(); tags.add(new Tag<>(GobblinClusterMetricTagNames.APPLICATION_NAME, applicationName)); tags.add(new Tag<>(GobblinClusterMetricTagNames.TASK_RUNNER_ID, taskRunnerId)); tags.addAll(getCustomTagsFromState(containerState)); return tags.build(); } }
2,200
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/TaskAttemptBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Iterator; import com.google.common.base.Optional; import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.runtime.GobblinMultiTaskAttempt; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.TaskExecutor; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.runtime.TaskStateTracker; import org.apache.gobblin.source.workunit.WorkUnit; public class TaskAttemptBuilder { private final TaskStateTracker _taskStateTracker; private final TaskExecutor _taskExecutor; private String _containerId; private StateStore<TaskState> _taskStateStore; public TaskAttemptBuilder(TaskStateTracker taskStateTracker, TaskExecutor taskExecutor) { _taskStateTracker = taskStateTracker; _taskExecutor = taskExecutor; } public TaskAttemptBuilder setContainerId(String containerId) { _containerId = containerId; return this; } public TaskAttemptBuilder setTaskStateStore(StateStore<TaskState> taskStateStore) { _taskStateStore = taskStateStore; return this; } public GobblinMultiTaskAttempt build(Iterator<WorkUnit> workUnits, String jobId, JobState jobState, SharedResourcesBroker<GobblinScopeTypes> jobBroker) { GobblinMultiTaskAttempt attemptInstance = new GobblinMultiTaskAttempt(workUnits, jobId, jobState, _taskStateTracker, _taskExecutor, Optional.fromNullable(_containerId), Optional.fromNullable(_taskStateStore), jobBroker); return attemptInstance; } }
2,201
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/InMemoryWuFailedSingleTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.util.List; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.collect.Lists; import com.typesafe.config.Config; /** * Instead of deserializing {@link JobState} and {@link WorkUnit} from filesystem, create them in memory. * This derived class will be failing due to missing the declaration of writerBuilder class thereby failing a Precondition * check in AvroWriterBuilder which is used by default. */ public class InMemoryWuFailedSingleTask extends SingleTask { public InMemoryWuFailedSingleTask(String jobId, Path workUnitFilePath, Path jobStateFilePath, FileSystem fs, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, Config dynamicConfig) { super(jobId, workUnitFilePath, jobStateFilePath, fs, taskAttemptBuilder, stateStores, dynamicConfig); } @Override protected List<WorkUnit> getWorkUnits() throws IOException { // Create WorkUnit in memory. WorkUnit workUnit = new WorkUnit(); workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, "randomTask"); workUnit.setProp("source.class", "org.apache.gobblin.cluster.DummySource"); return Lists.newArrayList(workUnit); } @Override protected JobState getJobState() throws IOException { JobState jobState = new JobState("randomJobName", "randomJobId"); return jobState; } }
2,202
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinClusterMetricTagNames.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import org.apache.gobblin.annotation.Alpha; /** * A central place for constants of {@link org.apache.gobblin.metrics.MetricContext} tag names for a Gobblin cluster. * Some shared constants have been moved to {@link org.apache.gobblin.metrics.event.TimingEvent.FlowEventConstants}. * * @author Yinan Li */ @Alpha public class GobblinClusterMetricTagNames { public static final String APPLICATION_NAME = "application.name"; public static final String APPLICATION_ID = "application.id"; public static final String HELIX_INSTANCE_NAME = "helix.instance.name"; public static final String TASK_RUNNER_ID = "task.runner.id"; }
2,203
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.TimeUnit; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; public class GobblinHelixMetrics extends StandardMetricsBridge.StandardMetrics { public static final String TIMER_FOR_HELIX_WAIT = "timeForHelixWait"; public static final String TIMER_FOR_HELIX_SUBMIT = "timeForHelixSubmit"; public static final String METER_FOR_HELIX_SUBMIT = "meterForHelixSubmit"; final String metricsName; final ContextAwareTimer timeForHelixWait; final ContextAwareTimer timeForHelixSubmit; final ContextAwareMeter submitMeter; public GobblinHelixMetrics(String metricsName, final MetricContext metricContext, int windowSizeInMin) { this.metricsName = metricsName; this.timeForHelixWait = metricContext.contextAwareTimer(TIMER_FOR_HELIX_WAIT, windowSizeInMin, TimeUnit.MINUTES); this.timeForHelixSubmit = metricContext.contextAwareTimer(TIMER_FOR_HELIX_SUBMIT, windowSizeInMin, TimeUnit.MINUTES); this.submitMeter = metricContext.contextAwareMeter(METER_FOR_HELIX_SUBMIT); this.contextAwareMetrics.add(timeForHelixWait); this.contextAwareMetrics.add(timeForHelixSubmit); this.contextAwareMetrics.add(submitMeter); } public void updateTimeForHelixSubmit(long startTime) { Instrumented.updateTimer( com.google.common.base.Optional.of(this.timeForHelixSubmit), System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } public void updateTimeForHelixWait(long startTime) { Instrumented.updateTimer( com.google.common.base.Optional.of(this.timeForHelixWait), System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } @Override public String getName() { return this.metricsName; } }
2,204
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Properties; import java.util.concurrent.Future; import org.quartz.InterruptableJob; import org.quartz.Job; import org.quartz.JobDataMap; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.quartz.UnableToInterruptJobException; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.scheduler.BaseGobblinJob; import org.apache.gobblin.scheduler.JobScheduler; /** * An implementation of a Quartz's {@link Job} that uses a {@link GobblinHelixJobLauncher} * to launch a Gobblin job. * * @author Yinan Li */ @Alpha @Slf4j public class GobblinHelixJob extends BaseGobblinJob implements InterruptableJob { private Future<?> cancellable = null; @Override public void executeImpl(JobExecutionContext context) throws JobExecutionException { JobDataMap dataMap = context.getJobDetail().getJobDataMap(); final JobScheduler jobScheduler = (JobScheduler) dataMap.get(JobScheduler.JOB_SCHEDULER_KEY); // the properties may get mutated during job execution and the scheduler reuses it for the next round of scheduling, // so clone it final Properties jobProps = (Properties)((Properties) dataMap.get(JobScheduler.PROPERTIES_KEY)).clone(); final JobListener jobListener = (JobListener) dataMap.get(JobScheduler.JOB_LISTENER_KEY); try { if (Boolean.parseBoolean(jobProps.getProperty(GobblinClusterConfigurationKeys.JOB_EXECUTE_IN_SCHEDULING_THREAD, Boolean.toString(GobblinClusterConfigurationKeys.JOB_EXECUTE_IN_SCHEDULING_THREAD_DEFAULT)))) { jobScheduler.runJob(jobProps, jobListener); } else { cancellable = jobScheduler.scheduleJobImmediately(jobProps, jobListener); } } catch (Throwable t) { throw new JobExecutionException(t); } } @Override public void interrupt() throws UnableToInterruptJobException { if (cancellable != null) { try { if (cancellable.cancel(false)) { return; } } catch (Exception e) { log.error("Failed to gracefully cancel job. Attempting to force cancellation.", e); } try { cancellable.cancel(true); } catch (Exception e) { throw new UnableToInterruptJobException(e); } } } }
2,205
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixDistributeJobExecutionLauncher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.Closeable; import java.io.IOException; import java.time.Duration; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeoutException; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixException; import org.apache.helix.HelixManager; import org.apache.helix.task.JobConfig; import org.apache.helix.task.TaskConfig; import org.apache.helix.task.TaskDriver; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.typesafe.config.Config; import javax.annotation.Nonnull; import javax.annotation.Nullable; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.runtime.api.ExecutionResult; import org.apache.gobblin.runtime.api.JobExecutionLauncher; import org.apache.gobblin.runtime.api.JobExecutionMonitor; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.MonitoredObject; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PropertiesUtils; /** * To avoid all the task driver logic ({@link GobblinHelixJobLauncher}) runs on the same * instance (manager), this {@link JobExecutionLauncher} will distribute the original job * to one of the task driver instance. The original task driver logic will be launched there. * * <p> * For job submission, the Helix workflow name will be the original job name with prefix * {@link GobblinClusterConfigurationKeys#PLANNING_JOB_NAME_PREFIX}. The Helix job name * will be the auto-generated planning job ID with prefix * {@link GobblinClusterConfigurationKeys#PLANNING_ID_KEY}. * </p> * * <p> * We will associate this job to Helix's {@link org.apache.helix.task.TaskFactory} * by specifying {@link GobblinTaskRunner#GOBBLIN_JOB_FACTORY_NAME} in the {@link JobConfig.Builder}. * This job will only contain a single task, which is the same as planningID. * </p> */ @Alpha @Slf4j class GobblinHelixDistributeJobExecutionLauncher implements JobExecutionLauncher, Closeable { protected HelixManager planningJobHelixManager; protected TaskDriver helixTaskDriver; protected Properties sysProps; protected Properties jobPlanningProps; protected HelixJobsMapping jobsMapping; protected GobblinHelixPlanningJobLauncherMetrics planningJobLauncherMetrics; protected GobblinHelixMetrics helixMetrics; protected static final String JOB_PROPS_PREFIX = "gobblin.jobProps."; private final long workFlowExpiryTimeSeconds; private final long helixJobStopTimeoutSeconds; private final long helixWorkflowSubmissionTimeoutSeconds; private boolean jobSubmitted; // A conditional variable for which the condition is satisfied if a cancellation is requested private final Object cancellationRequest = new Object(); // A flag indicating whether a cancellation has been requested or not private volatile boolean cancellationRequested = false; // A flag indicating whether a cancellation has been executed or not private volatile boolean cancellationExecuted = false; // A flag indicating wheter a planning job should wait for its completion private boolean nonBlockingMode = false; @Getter private DistributeJobMonitor jobMonitor; private final Config combinedConfigs; public GobblinHelixDistributeJobExecutionLauncher(Builder builder) { this.planningJobHelixManager = builder.planningJobHelixManager; this.helixTaskDriver = new TaskDriver(this.planningJobHelixManager); this.sysProps = builder.sysProps; this.jobPlanningProps = builder.jobPlanningProps; this.jobSubmitted = false; combinedConfigs = ConfigUtils.propertiesToConfig(jobPlanningProps) .withFallback(ConfigUtils.propertiesToConfig(sysProps)); this.workFlowExpiryTimeSeconds = ConfigUtils.getLong(this.combinedConfigs, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_EXPIRY_TIME_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_EXPIRY_TIME_SECONDS); this.planningJobLauncherMetrics = builder.planningJobLauncherMetrics; this.nonBlockingMode = ConfigUtils.getBoolean(this.combinedConfigs, GobblinClusterConfigurationKeys.NON_BLOCKING_PLANNING_JOB_ENABLED, GobblinClusterConfigurationKeys.DEFAULT_NON_BLOCKING_PLANNING_JOB_ENABLED); this.helixMetrics = builder.helixMetrics; this.jobsMapping = builder.jobsMapping; this.helixJobStopTimeoutSeconds = ConfigUtils.getLong(this.combinedConfigs, GobblinClusterConfigurationKeys.HELIX_JOB_STOP_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_STOP_TIMEOUT_SECONDS); this.helixWorkflowSubmissionTimeoutSeconds = ConfigUtils.getLong(this.combinedConfigs, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS); } @Override public void close() throws IOException { } private void executeCancellation() { if (this.jobSubmitted) { String planningJobId = getPlanningJobId(this.jobPlanningProps); try { if (this.cancellationRequested && !this.cancellationExecuted) { boolean cancelByDelete = ConfigUtils.getBoolean(this.combinedConfigs, GobblinClusterConfigurationKeys.CANCEL_HELIX_JOB_BY_DELETE, GobblinClusterConfigurationKeys.DEFAULT_CANCEL_HELIX_JOB_BY_DELETE); HelixUtils.cancelWorkflow(planningJobId, this.planningJobHelixManager, helixJobStopTimeoutSeconds * 1000, cancelByDelete); log.info("Canceled the workflow {}", planningJobId); } } catch (HelixException e) { // Cancellation may throw an exception, but Helix set the job state to STOP/DELETE and it should eventually be cleaned up // We will keep this.cancellationExecuted and this.cancellationRequested to true and not propagate the exception log.error("Failed to cancel workflow {} in Helix", planningJobId, e); } catch (InterruptedException e) { log.error("Thread interrupted while trying to cancel the workflow {} in Helix", planningJobId); Thread.currentThread().interrupt(); } } } @Setter public static class Builder { Properties sysProps; Properties jobPlanningProps; HelixManager planningJobHelixManager; Path appWorkDir; GobblinHelixPlanningJobLauncherMetrics planningJobLauncherMetrics; GobblinHelixMetrics helixMetrics; HelixJobsMapping jobsMapping; public GobblinHelixDistributeJobExecutionLauncher build() throws Exception { return new GobblinHelixDistributeJobExecutionLauncher(this); } } protected String getPlanningJobId (Properties jobPlanningProps) { if (jobPlanningProps.containsKey(GobblinClusterConfigurationKeys.PLANNING_ID_KEY)) { return jobPlanningProps.getProperty(GobblinClusterConfigurationKeys.PLANNING_ID_KEY); } else { throw new RuntimeException("Cannot find planning id"); } } /** * Create a job config builder which has a single task that wraps the original jobProps. * * The planning job (which runs the original {@link GobblinHelixJobLauncher}) will be * executed on one of the Helix participants. * * We rely on the underlying {@link GobblinHelixJobLauncher} to correctly handle the task * execution timeout so that the planning job itself is relieved of the timeout constrain. * * In short, the planning job will run once and requires no timeout. */ private JobConfig.Builder createJobBuilder (Properties jobProps) { // Create a single task for job planning String planningId = getPlanningJobId(jobProps); Map<String, TaskConfig> taskConfigMap = Maps.newHashMap(); Map<String, String> rawConfigMap = Maps.newHashMap(); for (String key : jobProps.stringPropertyNames()) { rawConfigMap.put(JOB_PROPS_PREFIX + key, (String)jobProps.get(key)); } rawConfigMap.put(GobblinClusterConfigurationKeys.TASK_SUCCESS_OPTIONAL_KEY, "true"); // Create a single Job which only contains a single task taskConfigMap.put(planningId, TaskConfig.Builder.from(rawConfigMap)); JobConfig.Builder jobConfigBuilder = new JobConfig.Builder(); // We want GobblinHelixJobLauncher only run once. jobConfigBuilder.setMaxAttemptsPerTask(1); // Planning job never timeout (Helix defaults 1h timeout, set a large number '1 month') jobConfigBuilder.setTimeoutPerTask(JobConfig.DEFAULT_TIMEOUT_PER_TASK * 24 * 30); // Planning job should have its own tag support if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY)) { String jobPlanningTag = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY); log.info("PlanningJob {} has tags associated : {}", planningId, jobPlanningTag); jobConfigBuilder.setInstanceGroupTag(jobPlanningTag); } // Planning job should have its own type support if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY)) { String jobType = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY); log.info("PlanningJob {} has types associated : {}", planningId, jobType); jobConfigBuilder.setJobType(jobType); } jobConfigBuilder.setNumConcurrentTasksPerInstance(PropertiesUtils.getPropAsInt(jobProps, GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY, GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT)); jobConfigBuilder.setFailureThreshold(1); jobConfigBuilder.addTaskConfigMap(taskConfigMap).setCommand(GobblinTaskRunner.GOBBLIN_JOB_FACTORY_NAME); return jobConfigBuilder; } /** * Submit a planning job to helix so that it can launched from a remote node. * @param jobName A planning job name which has prefix {@link GobblinClusterConfigurationKeys#PLANNING_JOB_NAME_PREFIX}. * @param jobId A planning job id created by {@link GobblinHelixDistributeJobExecutionLauncher#getPlanningJobId}. * @param jobConfigBuilder A job config builder which contains a single task. */ private void submitJobToHelix(String jobName, String jobId, JobConfig.Builder jobConfigBuilder) throws Exception { TaskDriver taskDriver = new TaskDriver(this.planningJobHelixManager); HelixUtils.submitJobToWorkFlow(jobConfigBuilder, jobName, jobId, taskDriver, this.planningJobHelixManager, Duration.ofSeconds(this.workFlowExpiryTimeSeconds), Duration.ofSeconds(this.helixWorkflowSubmissionTimeoutSeconds)); this.jobSubmitted = true; } @Override public DistributeJobMonitor launchJob(@Nullable JobSpec jobSpec) { this.jobMonitor = new DistributeJobMonitor(new DistributeJobCallable(this.jobPlanningProps)); return this.jobMonitor; } @AllArgsConstructor private class DistributeJobCallable implements Callable<ExecutionResult> { Properties jobPlanningProps; @Override public DistributeJobResult call() throws Exception { String planningId = getPlanningJobId(this.jobPlanningProps); JobConfig.Builder builder = createJobBuilder(this.jobPlanningProps); try { long submitStartTime = System.currentTimeMillis(); GobblinHelixDistributeJobExecutionLauncher.this.helixMetrics.submitMeter.mark(); submitJobToHelix(planningId, planningId, builder); GobblinHelixDistributeJobExecutionLauncher.this.helixMetrics.updateTimeForHelixSubmit(submitStartTime); long waitStartTime = System.currentTimeMillis(); // we should not wait if in non-blocking mode. DistributeJobResult rst = new DistributeJobResult(); if (!GobblinHelixDistributeJobExecutionLauncher.this.nonBlockingMode) { rst = waitForJobCompletion(planningId, planningId); GobblinHelixDistributeJobExecutionLauncher.this.helixMetrics.updateTimeForHelixWait(waitStartTime); } return rst; } catch (Exception e) { log.error(planningId + " is not able to submit."); return new DistributeJobResult(); } } } private DistributeJobResult waitForJobCompletion(String workFlowName, String jobName) throws InterruptedException { boolean timeoutEnabled = Boolean.parseBoolean(this.jobPlanningProps.getProperty( GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_ENABLED_KEY, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_TIMEOUT_ENABLED)); long timeoutInSeconds = Long.parseLong(this.jobPlanningProps.getProperty( GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_TIMEOUT_SECONDS)); long stoppingStateTimeoutInSeconds = PropertiesUtils .getPropAsLong(this.jobPlanningProps, GobblinClusterConfigurationKeys.HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS); try { HelixUtils.waitJobCompletion( GobblinHelixDistributeJobExecutionLauncher.this.planningJobHelixManager, workFlowName, jobName, timeoutEnabled ? Optional.of(timeoutInSeconds) : Optional.empty(), stoppingStateTimeoutInSeconds); return getResultFromUserContent(); } catch (TimeoutException te) { HelixUtils.handleJobTimeout(workFlowName, jobName, planningJobHelixManager, this, null); return new DistributeJobResult(); } } protected DistributeJobResult getResultFromUserContent() { return new DistributeJobResult(); } /** * If {@link GobblinClusterConfigurationKeys#NON_BLOCKING_PLANNING_JOB_ENABLED} is enabled * this result object contains nothing; otherwise this result object can be used to contain * any values returned from other task-driver instances. */ static class DistributeJobResult implements ExecutionResult { } private class DistributeJobMonitor extends FutureTask<ExecutionResult> implements JobExecutionMonitor { private ExecutorService executor = Executors.newSingleThreadExecutor(); DistributeJobMonitor (Callable<ExecutionResult> c) { super(c); this.executor.execute(this); } @Override public MonitoredObject getRunningState() { throw new UnsupportedOperationException(); } /** * We override Future's cancel method, which means we do not send the interrupt to the underlying thread. * Instead of that, we submit a STOP request to handle, and the underlying thread is supposed to gracefully accept * the STOPPED state and exit in {@link #waitForJobCompletion} method. * @param mayInterruptIfRunning this is ignored. * @return true always */ @Override public boolean cancel(boolean mayInterruptIfRunning) { GobblinHelixDistributeJobExecutionLauncher.this.executeCancellation(); return true; } } /** * This method calls the underlying {@link DistributeJobMonitor}'s cancel method. * It uses a conditional variable {@link GobblinHelixDistributeJobExecutionLauncher#cancellationRequest} * and a flag {@link GobblinHelixDistributeJobExecutionLauncher#cancellationRequested} to avoid double cancellation. */ public void cancel() { DistributeJobMonitor jobMonitor = getJobMonitor(); if (jobMonitor != null) { synchronized (GobblinHelixDistributeJobExecutionLauncher.this.cancellationRequest) { if (GobblinHelixDistributeJobExecutionLauncher.this.cancellationRequested) { // Return immediately if a cancellation has already been requested return; } GobblinHelixDistributeJobExecutionLauncher.this.cancellationRequested = true; } jobMonitor.cancel(true); GobblinHelixDistributeJobExecutionLauncher.this.cancellationExecuted = true; } } @Override public StandardMetrics getMetrics() { throw new UnsupportedOperationException(); } @Nonnull @Override public MetricContext getMetricContext() { throw new UnsupportedOperationException(); } @Override public boolean isInstrumentationEnabled() { return false; } @Override public List<Tag<?>> generateTags(State state) { return Lists.newArrayList(); } @Override public void switchMetricContext(List<Tag<?>> tags) { throw new UnsupportedOperationException(); } @Override public void switchMetricContext(MetricContext context) { throw new UnsupportedOperationException(); } }
2,206
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixTaskMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.runtime.TaskExecutor; public class GobblinHelixTaskMetrics extends StandardMetricsBridge.StandardMetrics { private TaskExecutor taskExecutor; private static String CURRENT_QUEUED_TASK_COUNT = "currentQueuedTaskCount"; private static String HISTORICAL_QUEUED_TASK_COUNT = "historicalQueuedTaskCount"; private static String QUEUED_TASK_COUNT = "queuedTaskCount"; private static String CURRENT_QUEUED_TASK_TOTAL_TIME = "currentQueuedTaskTotalTime"; private static String HISTORICAL_QUEUED_TASK_TOTAL_TIME = "historicalQueuedTaskTotalTime"; private static String QUEUED_TASK_TOTAL_TIME = "queuedTaskTotalTime"; private static String FAILED_TASK_COUNT = "failedTaskCount"; private static String SUCCESSFUL_TASK_COUNT = "successfulTaskCount"; private static String RUNNING_TASK_COUNT = "runningTaskCount"; private static String TIMER_FOR_TASK_EXEC = "timeForTaskExec"; private static String HELIX_TASK_TOTAL_COMPLETED = "helixTaskTotalCompleted"; private static String HELIX_TASK_TOTAL_FAILED = "helixTaskTotalFailed"; private static String HELIX_TASK_TOTAL_CANCELLED = "helixTaskTotalCancelled"; private static String HELIX_TASK_TOTAL_RUNNING = "helixTaskTotalRunning"; private final ContextAwareTimer timeForTaskExecution; AtomicLong helixTaskTotalCompleted; AtomicLong helixTaskTotalCancelled; AtomicLong helixTaskTotalFailed; AtomicLong helixTaskTotalRunning; public GobblinHelixTaskMetrics (TaskExecutor executor, MetricContext context, int windowSizeInMin) { this.taskExecutor = executor; this.helixTaskTotalCompleted = new AtomicLong(0); this.helixTaskTotalFailed = new AtomicLong(0); this.helixTaskTotalRunning = new AtomicLong(0); this.helixTaskTotalCancelled = new AtomicLong(0); this.timeForTaskExecution = context.contextAwareTimer(TIMER_FOR_TASK_EXEC, windowSizeInMin, TimeUnit.MINUTES); this.contextAwareMetrics.add(context.newContextAwareGauge(CURRENT_QUEUED_TASK_COUNT, ()->this.taskExecutor.getCurrentQueuedTaskCount().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(CURRENT_QUEUED_TASK_TOTAL_TIME, ()->this.taskExecutor.getCurrentQueuedTaskTotalTime().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(HISTORICAL_QUEUED_TASK_COUNT, ()->this.taskExecutor.getHistoricalQueuedTaskCount().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(HISTORICAL_QUEUED_TASK_TOTAL_TIME, ()->this.taskExecutor.getHistoricalQueuedTaskTotalTime().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(QUEUED_TASK_COUNT, ()->this.taskExecutor.getQueuedTaskCount().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(QUEUED_TASK_TOTAL_TIME, ()->this.taskExecutor.getQueuedTaskTotalTime().longValue())); this.contextAwareMetrics.add(context.newContextAwareGauge(FAILED_TASK_COUNT, ()->this.taskExecutor.getFailedTaskCount().getCount())); this.contextAwareMetrics.add(context.newContextAwareGauge(SUCCESSFUL_TASK_COUNT, ()->this.taskExecutor.getSuccessfulTaskCount().getCount())); this.contextAwareMetrics.add(context.newContextAwareGauge(RUNNING_TASK_COUNT, ()->this.taskExecutor.getRunningTaskCount().getCount())); this.contextAwareMetrics.add(context.newContextAwareGauge(HELIX_TASK_TOTAL_COMPLETED, ()->this.helixTaskTotalCompleted.get())); this.contextAwareMetrics.add(context.newContextAwareGauge(HELIX_TASK_TOTAL_FAILED, ()->this.helixTaskTotalFailed.get())); this.contextAwareMetrics.add(context.newContextAwareGauge(HELIX_TASK_TOTAL_CANCELLED, ()->this.helixTaskTotalCancelled.get())); this.contextAwareMetrics.add(context.newContextAwareGauge(HELIX_TASK_TOTAL_RUNNING, ()->this.helixTaskTotalRunning.get())); this.contextAwareMetrics.add(this.timeForTaskExecution); this.rawMetrics.put(ConfigurationKeys.WORK_UNIT_CREATION_AND_RUN_INTERVAL, this.taskExecutor.getTaskCreateAndRunTimer()); } public void updateTimeForTaskExecution(long startTime) { Instrumented.updateTimer( com.google.common.base.Optional.of(this.timeForTaskExecution), System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } @Override public String getName() { return GobblinHelixTaskMetrics.class.getName(); } }
2,207
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixTaskEventMetadataGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Map; import com.google.common.collect.ImmutableMap; import org.apache.gobblin.annotation.Alias; import org.apache.gobblin.configuration.State; import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator; @Alias("helixtask") public class HelixTaskEventMetadataGenerator implements TaskEventMetadataGenerator { public static final String HELIX_INSTANCE_KEY = "helixInstance"; public static final String HOST_NAME_KEY = "containerNode"; public static final String HELIX_JOB_ID_KEY = "helixJobId"; public static final String HELIX_TASK_ID_KEY = "helixTaskId"; public static final String CONTAINER_ID_KEY = "containerId"; /** * Generate a map of additional metadata for the specified event name. For tasks running in Gobblin cluster * we add container info such as containerId, host name where the task is running to each event. * * @param taskState * @param eventName the event name used to determine which additional metadata should be emitted * @return {@link Map} with the additional metadata */ @Override public Map<String, String> getMetadata(State taskState, String eventName) { String helixInstanceName = taskState.getProp(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, ""); String helixJobId = taskState.getProp(GobblinClusterConfigurationKeys.HELIX_JOB_ID_KEY, ""); String helixTaskId = taskState.getProp(GobblinClusterConfigurationKeys.HELIX_TASK_ID_KEY, ""); String hostName = taskState.getProp(GobblinClusterConfigurationKeys.TASK_RUNNER_HOST_NAME_KEY, ""); String containerId = taskState.getProp(GobblinClusterConfigurationKeys.CONTAINER_ID_KEY, ""); return ImmutableMap.of(HELIX_INSTANCE_KEY, helixInstanceName, HOST_NAME_KEY, hostName, HELIX_JOB_ID_KEY, helixJobId, HELIX_TASK_ID_KEY, helixTaskId, CONTAINER_ID_KEY, containerId); } }
2,208
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/JobConfigurationManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.File; import java.io.IOException; import java.util.List; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import org.apache.gobblin.cluster.event.CancelJobConfigArrivalEvent; import org.apache.gobblin.runtime.job_spec.JobSpecResolver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Strings; import com.google.common.eventbus.EventBus; import com.google.common.util.concurrent.AbstractIdleService; import com.typesafe.config.Config; import javax.annotation.Nullable; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.cluster.event.DeleteJobConfigArrivalEvent; import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent; import org.apache.gobblin.cluster.event.UpdateJobConfigArrivalEvent; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.SchedulerUtils; /** * A class for managing Gobblin job configurations. * * <p> * Currently this class reads all-at-once at startup all the job configuration files found * in the directory uncompressed from the job configuration file package and have them all * scheduled by the {@link GobblinHelixJobScheduler} by posting a * {@link NewJobConfigArrivalEvent} for each job configuration file. * </p> * * <p> * In the future, we may add the ability to accept new job configuration files or updates * to existing configuration files at runtime to this class. * </p> * * @author Yinan Li */ @Alpha public class JobConfigurationManager extends AbstractIdleService implements StandardMetricsBridge { private static final Logger LOGGER = LoggerFactory.getLogger(JobConfigurationManager.class); private Optional<Pattern> jobsToRun; protected final EventBus eventBus; protected final Config config; protected Optional<String> jobConfDirPath; protected final JobSpecResolver jobSpecResolver; public JobConfigurationManager(EventBus eventBus, Config config) { this.eventBus = eventBus; this.config = config; this.jobConfDirPath = config.hasPath(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY) ? Optional .of(config.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY)) : Optional.<String>absent(); String jobsToRunRegex = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.JOBS_TO_RUN, ""); try { this.jobsToRun = !Strings.isNullOrEmpty(jobsToRunRegex) ? Optional.of(Pattern.compile(config.getString(GobblinClusterConfigurationKeys.JOBS_TO_RUN))) : Optional.absent(); } catch (PatternSyntaxException e) { LOGGER.error("Invalid regex pattern: {}, Exception: {}", jobsToRunRegex, e); this.jobsToRun = Optional.absent(); } try { this.jobSpecResolver = JobSpecResolver.builder(config).build(); } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override protected void startUp() throws Exception { if (this.jobConfDirPath.isPresent()) { File path = new File(this.jobConfDirPath.get()); File jobConfigDir = path; // Backward compatibility: Previous impl was forcing users to look for jobConf within ${user.dir} // .. so if jobConfigDir does not exists, try to resolve config path via legacy route for backward // .. compatibility if (!path.exists()) { String pwd = System.getProperty("user.dir"); jobConfigDir = new File(pwd, path.getName() + GobblinClusterConfigurationKeys.TAR_GZ_FILE_SUFFIX); } if (jobConfigDir.exists()) { LOGGER.info("Loading job configurations from " + jobConfigDir); Properties properties = ConfigUtils.configToProperties(this.config); properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, "file://" + jobConfigDir.getAbsolutePath()); List<Properties> jobConfigs = SchedulerUtils.loadGenericJobConfigs(properties, this.jobSpecResolver); LOGGER.info("Loaded " + jobConfigs.size() + " job configuration(s)"); for (Properties config : jobConfigs) { if (!jobsToRun.isPresent() || shouldRun(jobsToRun.get(), config)) { postNewJobConfigArrival(config.getProperty(ConfigurationKeys.JOB_NAME_KEY), config); } else { LOGGER.warn("Job {} has been filtered and will not be run in the cluster.", config.getProperty(ConfigurationKeys.JOB_NAME_KEY)); } } } else { LOGGER.warn("Job configuration directory " + jobConfigDir + " not found"); } } } @VisibleForTesting /** * A helper method to determine if a given job should be submitted to cluster for execution based on the * regex defining the jobs to run. */ protected static boolean shouldRun(Pattern jobsToRun, Properties jobConfig) { Matcher matcher = jobsToRun.matcher(jobConfig.getProperty(ConfigurationKeys.JOB_NAME_KEY)); return matcher.matches(); } @Override protected void shutDown() throws Exception { // Nothing to do } protected void postNewJobConfigArrival(String jobName, Properties jobConfig) { LOGGER.info(String.format("Posting new JobConfig with name: %s and config: %s", jobName, jobConfig)); this.eventBus.post(new NewJobConfigArrivalEvent(jobName, jobConfig)); } protected void postUpdateJobConfigArrival(String jobName, Properties jobConfig) { LOGGER.info(String.format("Posting update JobConfig with name: %s and config: %s", jobName, jobConfig)); this.eventBus.post(new UpdateJobConfigArrivalEvent(jobName, jobConfig)); } protected void postDeleteJobConfigArrival(String jobName, @Nullable Properties jobConfig) { LOGGER.info(String.format("Posting delete JobConfig with name: %s and config: %s", jobName, jobConfig)); this.eventBus.post(new DeleteJobConfigArrivalEvent(jobName, jobConfig)); } protected void postCancelJobConfigArrival(String jobUri) { LOGGER.info(String.format("Posting cancel JobConfig with name: %s", jobUri)); this.eventBus.post(new CancelJobConfigArrivalEvent(jobUri)); } }
2,209
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixJobsMapping.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.net.URI; import java.util.List; import java.util.Optional; import java.util.Properties; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import javax.annotation.Nullable; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.metastore.FsStateStore; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.JobLauncherUtils; /** * <p> Any job that submitted to Helix will have a unique id. * We need some mapping between the job name and the job id, * in order to perform: * * 1) cancel a running job * 2) delete a running job * 3) block any incoming job with same name. * * <p> More complexity comes into the picture in the distributed * ask driver mode, where we will have a job name, which maps to a * planning job id and further maps to a real job id. * * <p> We will leverage the state store functionality. We will save * job name as a storeName, and tableName. The planning job id and * real job id will be saved in the state object. */ public class HelixJobsMapping { public static final String JOBS_MAPPING_DB_TABLE_KEY = "jobs.mapping.db.table.key"; public static final String DEFAULT_JOBS_MAPPING_DB_TABLE_KEY_NAME = "JobsMapping"; public static final String DISTRIBUTED_STATE_STORE_NAME_KEY = "jobs.mapping.distributed.state.store.name"; public static final String DEFAULT_DISTRIBUTED_STATE_STORE_NAME = "distributedState"; private StateStore<State> stateStore; private String distributedStateStoreName; public HelixJobsMapping(Config sysConfig, URI fsUri, String rootDir) { String stateStoreType = ConfigUtils.getString(sysConfig, ConfigurationKeys.INTERMEDIATE_STATE_STORE_TYPE_KEY, ConfigUtils.getString(sysConfig, ConfigurationKeys.STATE_STORE_TYPE_KEY, ConfigurationKeys.DEFAULT_STATE_STORE_TYPE)); ClassAliasResolver<StateStore.Factory> resolver = new ClassAliasResolver<>(StateStore.Factory.class); StateStore.Factory stateStoreFactory; try { stateStoreFactory = resolver.resolveClass(stateStoreType).newInstance(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } String dbTableKey = ConfigUtils.getString(sysConfig, JOBS_MAPPING_DB_TABLE_KEY, DEFAULT_JOBS_MAPPING_DB_TABLE_KEY_NAME); this.distributedStateStoreName = ConfigUtils.getString(sysConfig, DISTRIBUTED_STATE_STORE_NAME_KEY, DEFAULT_DISTRIBUTED_STATE_STORE_NAME); Config stateStoreJobConfig = sysConfig .withValue(ConfigurationKeys.STATE_STORE_FS_URI_KEY, ConfigValueFactory.fromAnyRef(fsUri.toString())) .withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef(rootDir)) .withValue(ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, ConfigValueFactory.fromAnyRef(dbTableKey)); this.stateStore = stateStoreFactory.createStateStore(stateStoreJobConfig, State.class); } public static String createPlanningJobId (Properties jobPlanningProps) { String flowExecIdSuffix = jobPlanningProps.containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY) ? "_" + jobPlanningProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY) : ""; return JobLauncherUtils.newJobId(GobblinClusterConfigurationKeys.PLANNING_JOB_NAME_PREFIX + JobState.getJobNameFromProps(jobPlanningProps) + flowExecIdSuffix); } public static String createActualJobId (Properties jobProps) { String flowExecIdSuffix = jobProps.containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY) ? "_" + jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY) : ""; return JobLauncherUtils.newJobId(GobblinClusterConfigurationKeys.ACTUAL_JOB_NAME_PREFIX + JobState.getJobNameFromProps(jobProps) + flowExecIdSuffix); } @Nullable private State getOrCreate (String storeName, String jobName) throws IOException { if (this.stateStore.exists(storeName, jobName)) { return this.stateStore.get(storeName, jobName, jobName); } return new State(); } public void deleteMapping (String jobName) throws IOException { this.stateStore.delete(this.distributedStateStoreName, jobName); } public void setPlanningJobId (String jobUri, String planningJobId) throws IOException { State state = getOrCreate(distributedStateStoreName, jobUri); state.setId(jobUri); state.setProp(GobblinClusterConfigurationKeys.PLANNING_ID_KEY, planningJobId); writeToStateStore(jobUri, state); } public void setActualJobId (String jobUri, String actualJobId) throws IOException { setActualJobId(jobUri, null, actualJobId); } public void setActualJobId (String jobUri, String planningJobId, String actualJobId) throws IOException { State state = getOrCreate(distributedStateStoreName, jobUri); state.setId(jobUri); if (null != planningJobId) { state.setProp(GobblinClusterConfigurationKeys.PLANNING_ID_KEY, planningJobId); } state.setProp(ConfigurationKeys.JOB_ID_KEY, actualJobId); writeToStateStore(jobUri, state); } public void setDistributedJobMode(String jobUri, boolean distributedJobMode) throws IOException { State state = getOrCreate(distributedStateStoreName, jobUri); state.setId(jobUri); state.setProp(GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, distributedJobMode); writeToStateStore(jobUri, state); } private void writeToStateStore(String jobUri, State state) throws IOException { // fs state store use hdfs rename, which assumes the target file doesn't exist. if (this.stateStore instanceof FsStateStore) { this.deleteMapping(jobUri); } this.stateStore.put(distributedStateStoreName, jobUri, state); } private Optional<String> getId (String jobUri, String idName) throws IOException { State state = this.stateStore.get(distributedStateStoreName, jobUri, jobUri); if (state == null) { return Optional.empty(); } String id = state.getProp(idName); return id == null ? Optional.empty() : Optional.of(id); } public List<State> getAllStates() throws IOException { return this.stateStore.getAll(distributedStateStoreName); } public Optional<String> getActualJobId (String jobUri) throws IOException { return getId(jobUri, ConfigurationKeys.JOB_ID_KEY); } public Optional<String> getPlanningJobId (String jobUri) throws IOException { return getId(jobUri, GobblinClusterConfigurationKeys.PLANNING_ID_KEY); } public Optional<String> getDistributedJobMode(String jobUri) throws IOException { State state = this.stateStore.get(distributedStateStoreName, jobUri, jobUri); if (state == null) { return Optional.empty(); } String id = state.getProp(GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED); return id == null ? Optional.empty() : Optional.of(id); } }
2,210
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixRetriggeringJobCallable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.time.Duration; import java.util.HashMap; import java.util.Optional; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.locks.Lock; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixException; import org.apache.helix.HelixManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.io.Closer; import com.google.common.util.concurrent.Striped; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.runtime.api.JobExecutionMonitor; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PropertiesUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * A {@link Callable} that can run a given job multiple times iff: * 1) Re-triggering is enabled and * 2) Job stops early. * * Based on the job properties, a job can be processed immediately (non-distribution mode) or forwarded to a remote * node (distribution mode). Details are as follows: * * <p> Non-Distribution Mode: * If {@link GobblinClusterConfigurationKeys#DISTRIBUTED_JOB_LAUNCHER_ENABLED} is false, the job will be handled * by {@link HelixRetriggeringJobCallable#runJobLauncherLoop()}, which simply launches {@link GobblinHelixJobLauncher} * and submit the work units to Helix. Helix will dispatch the work units to different worker nodes. The worker node will * handle the work units via launching {@link GobblinHelixTask}. * * See {@link GobblinHelixJobLauncher} for job launcher details. * See {@link GobblinHelixTask} for work unit handling details. * </p> * * <p> Distribution Mode: * If {@link GobblinClusterConfigurationKeys#DISTRIBUTED_JOB_LAUNCHER_ENABLED} is true, the job will be handled * by {@link HelixRetriggeringJobCallable#runJobExecutionLauncher()}, which simply launches * {@link GobblinHelixDistributeJobExecutionLauncher} and submit a planning job to Helix. Helix will dispatch this * planning job to a task-driver node. The task-driver node will handle this planning job via launching * {@link GobblinHelixJobTask}. * * The {@link GobblinHelixJobTask} will again launch {@link GobblinHelixJobLauncher} to submit the actual job * to Helix. Helix will dispatch the work units to other worker nodes. Similar to Non-Distribution Node, * some worker nodes will handle those work units by launching {@link GobblinHelixTask}. * * See {@link GobblinHelixDistributeJobExecutionLauncher} for planning job launcher details. * See {@link GobblinHelixJobTask} for planning job handling details. * See {@link GobblinHelixJobLauncher} for job launcher details. * See {@link GobblinHelixTask} for work unit handling details. * </p> */ @Slf4j @Alpha class HelixRetriggeringJobCallable implements Callable { private final GobblinHelixJobScheduler jobScheduler; private final MutableJobCatalog jobCatalog; private final Properties sysProps; private final Properties jobProps; private final JobListener jobListener; private final GobblinHelixPlanningJobLauncherMetrics planningJobLauncherMetrics; private final GobblinHelixMetrics helixMetrics; private final Path appWorkDir; private final HelixManager jobHelixManager; private final Optional<HelixManager> taskDriverHelixManager; protected HelixJobsMapping jobsMapping; private GobblinHelixJobLauncher currentJobLauncher = null; private JobExecutionMonitor currentJobMonitor = null; private boolean isDistributeJobEnabled = false; private final String jobUri; private boolean jobDeleteAttempted = false; private final Striped<Lock> locks; private final MetricContext metricContext; private final EventSubmitter eventSubmitter; public HelixRetriggeringJobCallable( GobblinHelixJobScheduler jobScheduler, MutableJobCatalog jobCatalog, Properties sysProps, Properties jobProps, JobListener jobListener, GobblinHelixPlanningJobLauncherMetrics planningJobLauncherMetrics, GobblinHelixMetrics helixMetrics, Path appWorkDir, HelixManager jobHelixManager, Optional<HelixManager> taskDriverHelixManager, HelixJobsMapping jobsMapping, Striped<Lock> locks, MetricContext metricContext) { this.jobScheduler = jobScheduler; this.jobCatalog = jobCatalog; this.sysProps = sysProps; this.jobProps = jobProps; this.jobListener = jobListener; this.planningJobLauncherMetrics = planningJobLauncherMetrics; this.helixMetrics = helixMetrics; this.appWorkDir = appWorkDir; this.jobHelixManager = jobHelixManager; this.taskDriverHelixManager = taskDriverHelixManager; this.isDistributeJobEnabled = isDistributeJobEnabled(); this.jobUri = jobProps.getProperty(GobblinClusterConfigurationKeys.JOB_SPEC_URI); this.jobsMapping = jobsMapping; this.locks = locks; this.metricContext = metricContext; eventSubmitter = new EventSubmitter.Builder(this.metricContext, "gobblin.runtime").build(); } private boolean isRetriggeringEnabled() { return PropertiesUtils.getPropAsBoolean(jobProps, ConfigurationKeys.JOB_RETRIGGERING_ENABLED, ConfigurationKeys.DEFAULT_JOB_RETRIGGERING_ENABLED); } private boolean isDistributeJobEnabled() { Properties combinedProps = new Properties(); combinedProps.putAll(sysProps); combinedProps.putAll(jobProps); return (PropertiesUtils.getPropAsBoolean(combinedProps, GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, Boolean.toString(GobblinClusterConfigurationKeys.DEFAULT_DISTRIBUTED_JOB_LAUNCHER_ENABLED))); } @Override public Void call() throws JobException { boolean deleteJobWhenException = PropertiesUtils.getPropAsBoolean(this.jobProps, GobblinClusterConfigurationKeys.JOB_ALWAYS_DELETE, "false"); try { this.jobsMapping.setDistributedJobMode(this.jobUri, this.isDistributeJobEnabled); } catch (IOException e) { throw new JobException("Could not update jobsMapping", e); } try { if (this.isDistributeJobEnabled) { runJobExecutionLauncher(); } else { runJobLauncherLoop(); } deleteJobSpec(); } catch (Exception e) { // delete job spec when exception occurred if (deleteJobWhenException) { deleteJobSpec(); } throw e; } return null; } private void deleteJobSpec() throws JobException { boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false")); boolean hasSchedule = jobProps.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY); if (runOnce || !hasSchedule) { if (this.jobCatalog != null) { try { if (!this.jobDeleteAttempted) { log.info("Deleting job spec on {}", this.jobUri); this.jobScheduler.unscheduleJob(this.jobUri); this.jobCatalog.remove(new URI(jobUri)); this.jobDeleteAttempted = true; } } catch (URISyntaxException e) { log.error("Failed to remove job with bad uri " + jobUri, e); } } } } @VisibleForTesting static GobblinHelixJobLauncher buildJobLauncherForCentralizedMode(GobblinHelixJobScheduler jobScheduler, Properties jobProps) throws Exception { //In centralized job launcher mode, the JOB_ID_KEY should be null or should not contain the //"ActualJob" substring, which is intended for the distributed job launcher mode. //This ensures that workflows in centralized mode are cleaned up properly when cluster is restarted. String jobId = jobProps.getProperty(ConfigurationKeys.JOB_ID_KEY); if (jobId != null) { Preconditions.checkArgument(!jobId.contains(GobblinClusterConfigurationKeys.ACTUAL_JOB_NAME_PREFIX), "Job Id should not contain ActualJob in centralized mode."); } return jobScheduler.buildJobLauncher(jobProps); } /** * A method to run a Gobblin job with ability to re-trigger the job if neeeded. This method instantiates a * {@link GobblinHelixJobLauncher} and submits the underlying Gobblin job to a {link GobblinHelixJobScheduler}. * The method will re-submit the job if it has been terminated "early" e.g. before all data has been pulled. * This method should be called only when distributed job launcher mode is disabled. */ private void runJobLauncherLoop() throws JobException { try { this.jobHelixManager.connect(); while (true) { currentJobLauncher = buildJobLauncherForCentralizedMode(jobScheduler, jobProps); // in "run once" case, job scheduler will remove current job from the scheduler boolean isEarlyStopped = this.jobScheduler.runJob(jobProps, jobListener, currentJobLauncher); boolean isRetriggerEnabled = this.isRetriggeringEnabled(); if (isEarlyStopped && isRetriggerEnabled) { log.info("Job {} will be re-triggered.", jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)); } else { break; } } } catch (Exception e) { log.error("Failed to run job {}", jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e); throw new JobException("Failed to run job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e); } finally { this.jobHelixManager.disconnect(); currentJobLauncher = null; } } /** * <p> Launch a planning job. The actual job will be launched * on task driver instance, which will handle the early-stop case * by a single while-loop. * * @see {@link GobblinHelixJobTask#run()} for the task driver logic. */ private void runJobExecutionLauncher() throws JobException { long startTime = 0; String newPlanningId; Closer closer = Closer.create(); try { HelixManager planningJobHelixManager = this.taskDriverHelixManager.orElse(this.jobHelixManager); planningJobHelixManager.connect(); String builderStr = jobProps.getProperty(GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_BUILDER, GobblinHelixDistributeJobExecutionLauncher.Builder.class.getName()); // Check if any existing planning job is running Optional<String> planningJobIdFromStore = jobsMapping.getPlanningJobId(this.jobUri); boolean nonblocking = false; // start of critical section to check if a job with same job name is running Lock jobLock = locks.get(this.jobUri); jobLock.lock(); try { if (planningJobIdFromStore.isPresent() && !canRun(planningJobIdFromStore.get(), planningJobHelixManager)) { TimingEvent timer = new TimingEvent(eventSubmitter, TimingEvent.JOB_SKIPPED_TIME); HashMap<String, String> metadata = new HashMap<>(Tag.toMap(Tag.tagValuesToString( HelixUtils.initBaseEventTags(jobProps, Lists.newArrayList())))); timer.stop(metadata); planningJobLauncherMetrics.skippedPlanningJobs.mark(); return; } log.info("Planning job for {} does not exist. First time run.", this.jobUri); GobblinHelixDistributeJobExecutionLauncher.Builder builder = GobblinConstructorUtils.<GobblinHelixDistributeJobExecutionLauncher.Builder>invokeLongestConstructor( new ClassAliasResolver(GobblinHelixDistributeJobExecutionLauncher.Builder.class).resolveClass(builderStr)); // Make a separate copy because we could update some of attributes in job properties (like adding planning id). Properties jobPlanningProps = new Properties(); jobPlanningProps.putAll(this.jobProps); // Inject planning id and start time newPlanningId = HelixJobsMapping.createPlanningJobId(jobPlanningProps); jobPlanningProps.setProperty(GobblinClusterConfigurationKeys.PLANNING_ID_KEY, newPlanningId); jobPlanningProps.setProperty(GobblinClusterConfigurationKeys.PLANNING_JOB_CREATE_TIME, String.valueOf(System.currentTimeMillis())); builder.setSysProps(this.sysProps); builder.setJobPlanningProps(jobPlanningProps); builder.setPlanningJobHelixManager(planningJobHelixManager); builder.setAppWorkDir(this.appWorkDir); builder.setJobsMapping(this.jobsMapping); builder.setPlanningJobLauncherMetrics(this.planningJobLauncherMetrics); builder.setHelixMetrics(this.helixMetrics); // if the distributed job launcher should wait for planning job completion Config combined = ConfigUtils.propertiesToConfig(jobPlanningProps) .withFallback(ConfigUtils.propertiesToConfig(sysProps)); nonblocking = ConfigUtils .getBoolean(combined, GobblinClusterConfigurationKeys.NON_BLOCKING_PLANNING_JOB_ENABLED, GobblinClusterConfigurationKeys.DEFAULT_NON_BLOCKING_PLANNING_JOB_ENABLED); log.info("Planning job {} started.", newPlanningId); GobblinHelixDistributeJobExecutionLauncher launcher = builder.build(); closer.register(launcher); this.jobsMapping.setPlanningJobId(this.jobUri, newPlanningId); startTime = System.currentTimeMillis(); this.currentJobMonitor = launcher.launchJob(null); // make sure the planning job is initialized (or visible) to other parallel running threads, // so that the same critical section check (querying Helix for job completeness) // can be applied. Duration submissionTimeout = Duration.ofSeconds(PropertiesUtils .getPropAsLong(sysProps, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS)); HelixUtils.waitJobInitialization(planningJobHelixManager, newPlanningId, newPlanningId, submissionTimeout); } finally { planningJobHelixManager.disconnect(); // end of the critical section to check if a job with same job name is running jobLock.unlock(); } // we can remove the job spec from the catalog because Helix will drive this job to the end. this.deleteJobSpec(); // If we are using non-blocking mode, this get() only guarantees the planning job is submitted. // It doesn't guarantee the job will finish because internally we won't wait for Helix completion. this.currentJobMonitor.get(); this.currentJobMonitor = null; if (nonblocking) { log.info("Planning job {} submitted successfully.", newPlanningId); } else { log.info("Planning job {} finished.", newPlanningId); this.planningJobLauncherMetrics.updateTimeForCompletedPlanningJobs(startTime); } } catch (Exception e) { if (startTime != 0) { this.planningJobLauncherMetrics.updateTimeForFailedPlanningJobs(startTime); } log.error("Failed to run planning job for {}", this.jobUri, e); throw new JobException("Failed to run planning job for " + this.jobUri, e); } finally { try { closer.close(); } catch (IOException e) { throw new JobException("Cannot properly close planning job for " + this.jobUri, e); } } } /** * This method checks if a job can be submitted to helix for execution. * A job can run, 1) if the previous job with the same job id is finished, * 2) if the previous job is running but can be killed specified by property * {@link GobblinClusterConfigurationKeys#KILL_DUPLICATE_PLANNING_JOB}, default being true * @param previousJobId job id from the previous execution * @param helixManager helix manager * @return true if the job can run on Helix * @throws JobException if previous job deletion fails * @throws InterruptedException */ private boolean canRun(String previousJobId, HelixManager helixManager) throws JobException, InterruptedException { if (HelixUtils.isJobFinished(previousJobId, previousJobId, helixManager)) { log.info("Previous planning job {} has reached to the final state. Start a new one.", previousJobId); } else { boolean killDuplicateJob = PropertiesUtils .getPropAsBoolean(this.jobProps, GobblinClusterConfigurationKeys.KILL_DUPLICATE_PLANNING_JOB, String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_KILL_DUPLICATE_PLANNING_JOB)); if (!killDuplicateJob) { log.info("Previous planning job {} has not finished yet. Skip this job.", previousJobId); return false; } else { log.info("Previous planning job {} has not finished yet. Kill it.", previousJobId); long timeOut = PropertiesUtils .getPropAsLong(sysProps, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS) * 1000; try { HelixUtils.deleteWorkflow(previousJobId, helixManager, timeOut); } catch (HelixException e) { log.info("Helix cannot delete previous planning job id {} within {} seconds.", previousJobId, timeOut / 1000); throw new JobException("Helix cannot delete previous planning job id " + previousJobId, e); } } } return true; } void cancel() throws JobException { this.jobScheduler.jobSchedulerMetrics.numCancellationStart.incrementAndGet(); if (isDistributeJobEnabled) { if (currentJobMonitor != null) { currentJobMonitor.cancel(false); } } else { if (currentJobLauncher != null) { currentJobLauncher.cancelJob(this.jobListener); } } this.jobScheduler.jobSchedulerMetrics.numCancellationComplete.incrementAndGet(); } }
2,211
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ContainerHealthCheckException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; /** * Signals that the container has failed one or more health checks. In other words, the container has been detected * itself to be in an unhealthy state. The application may want to catch this exception to take an appropriate * action e.g. exiting with an appropriate exit code. */ public class ContainerHealthCheckException extends RuntimeException { public ContainerHealthCheckException() { super(); } public ContainerHealthCheckException(String message) { super(message); } public ContainerHealthCheckException(String message, Throwable cause) { super(message, cause); } }
2,212
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleHelixTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Map; import org.apache.helix.task.Task; import org.apache.helix.task.TaskResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Throwables; import org.apache.gobblin.configuration.ConfigurationKeys; public class SingleHelixTask implements Task { private static final Logger logger = LoggerFactory.getLogger(SingleHelixTask.class); private final String jobId; private final String jobName; private final Process taskProcess; SingleHelixTask(final SingleTaskLauncher launcher, final Map<String, String> configMap) throws IOException { this.jobName = configMap.get(ConfigurationKeys.JOB_NAME_KEY); this.jobId = configMap.get(ConfigurationKeys.JOB_ID_KEY); final Path workUnitFilePath = Paths.get(configMap.get(GobblinClusterConfigurationKeys.WORK_UNIT_FILE_PATH)); logger.info(String .format("Launching a single task process. job name: %s. job id: %s", this.jobName, this.jobId)); this.taskProcess = launcher.launch(this.jobId, workUnitFilePath); } @Override public TaskResult run() { try { logger.info(String .format("Waiting for a single task process to finish. job name: %s. job id: %s", this.jobName, this.jobId)); int exitCode = this.taskProcess.waitFor(); if (exitCode == 0) { logger.info("Task process finished. job name: {}. job id: {}", this.jobName, this.jobId); return new TaskResult(TaskResult.Status.COMPLETED, ""); } else { logger.warn("Task process failed with exitcode ({}). job name: {}. job id: {}", exitCode, this.jobName, this.jobId); return new TaskResult(TaskResult.Status.FATAL_FAILED, "Exit code: " + exitCode); } } catch (final Throwable t) { logger.error("SingleHelixTask failed due to " + t.getMessage(), t); return new TaskResult(TaskResult.Status.FAILED, Throwables.getStackTraceAsString(t)); } } @Override public void cancel() { logger.info(String .format("Canceling a single task process. job name: %s. job id: %s", this.jobName, this.jobId)); this.taskProcess.destroyForcibly(); } }
2,213
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinThrottlingHelixJobLauncherListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.time.Instant; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.JobContext; import org.apache.gobblin.runtime.JobState; /** * A job listener used when {@link GobblinHelixJobLauncher} launches a job. * In {@link GobblinHelixJobScheduler}, when throttling is enabled, this * listener would record jobName to next schedulable time to decide whether * the replanning should be executed or skipped. */ @Slf4j public class GobblinThrottlingHelixJobLauncherListener extends GobblinHelixJobLauncherListener { public final static Logger LOG = LoggerFactory.getLogger(GobblinThrottlingHelixJobLauncherListener.class); private ConcurrentHashMap<String, Instant> jobNameToNextSchedulableTime; public GobblinThrottlingHelixJobLauncherListener(GobblinHelixJobLauncherMetrics jobLauncherMetrics, ConcurrentHashMap<String, Instant> jobNameToNextSchedulableTime) { super(jobLauncherMetrics); this.jobNameToNextSchedulableTime = jobNameToNextSchedulableTime; } @Override public void onJobCompletion(JobContext jobContext) throws Exception { super.onJobCompletion(jobContext); if (jobContext.getJobState().getState() == JobState.RunningState.FAILED) { jobNameToNextSchedulableTime.put(jobContext.getJobName(), Instant.EPOCH); LOG.info("{} failed. The next schedulable time is {} so that any future schedule attempts will be allowed.", jobContext.getJobName(), Instant.EPOCH); } } @Override public void onJobCancellation(JobContext jobContext) throws Exception { super.onJobCancellation(jobContext); jobNameToNextSchedulableTime.put(jobContext.getJobName(), Instant.EPOCH); LOG.info("{} is cancelled. The next schedulable time is {} so that any future schedule attempts will be allowed.", jobContext.getJobName(), Instant.EPOCH); } }
2,214
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobLauncher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.net.URI; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixManager; import org.apache.helix.task.JobConfig; import org.apache.helix.task.JobQueue; import org.apache.helix.task.TaskConfig; import org.apache.helix.task.TaskDriver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import javax.annotation.Nullable; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.metrics.event.CountEventBuilder; import org.apache.gobblin.metrics.event.JobEvent; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.rest.LauncherTypeEnum; import org.apache.gobblin.runtime.AbstractJobLauncher; import org.apache.gobblin.runtime.ExecutionModel; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.runtime.JobLauncher; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.Task; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.runtime.TaskStateCollectorService; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.JobLauncherUtils; import org.apache.gobblin.util.ParallelRunner; import org.apache.gobblin.util.PropertiesUtils; import org.apache.gobblin.util.SerializationUtils; /** * An implementation of {@link JobLauncher} that launches a Gobblin job using the Helix task framework. * * <p> * This class uses the Helix task execution framework to run tasks of Gobblin jobs. It creates one Helix * {@link JobQueue} per job and submits every scheduled runs of a job to its {@link JobQueue}, where Helix * picks up them and submit them for execution. After submitting the job run to its {@link JobQueue}, it * waits for the job to complete and collects the output {@link TaskState}(s) upon completion. * </p> * * <p> * Each {@link WorkUnit} of the job is persisted to the {@link FileSystem} of choice and the path to the file * storing the serialized {@link WorkUnit} is passed to the Helix task running the {@link WorkUnit} as a * user-defined property {@link GobblinClusterConfigurationKeys#WORK_UNIT_FILE_PATH}. Upon startup, the Helix * task reads the property for the file path and de-serializes the {@link WorkUnit} from the file. * </p> * * <p> * This class is instantiated by the {@link GobblinHelixJobScheduler} on every job submission to launch the Gobblin job. * The actual task execution happens in the {@link GobblinTaskRunner}, usually in a different process. * </p> * * @author Yinan Li */ @Alpha @Slf4j public class GobblinHelixJobLauncher extends AbstractJobLauncher { private static final Logger LOGGER = LoggerFactory.getLogger(GobblinHelixJobLauncher.class); private final HelixManager helixManager; private final TaskDriver helixTaskDriver; private final String helixWorkFlowName; private JobListener jobListener; private final FileSystem fs; private final Path appWorkDir; private final Path inputWorkUnitDir; private final Path outputTaskStateDir; // Number of ParallelRunner threads to be used for state serialization/deserialization private final int stateSerDeRunnerThreads; private final TaskStateCollectorService taskStateCollectorService; private final Optional<GobblinHelixMetrics> helixMetrics; private volatile boolean jobSubmitted = false; private final ConcurrentHashMap<String, Boolean> runningMap; private final StateStores stateStores; private final Config jobConfig; private final long workFlowExpiryTimeSeconds; private final long helixJobStopTimeoutSeconds; private final long helixWorkflowSubmissionTimeoutSeconds; private Map<String, TaskConfig> workUnitToHelixConfig; private Retryer<Boolean> taskRetryer; public GobblinHelixJobLauncher(Properties jobProps, final HelixManager helixManager, Path appWorkDir, List<? extends Tag<?>> metadataTags, ConcurrentHashMap<String, Boolean> runningMap, Optional<GobblinHelixMetrics> helixMetrics) throws Exception { super(jobProps, HelixUtils.initBaseEventTags(jobProps, metadataTags)); LOGGER.debug("GobblinHelixJobLauncher: jobProps {}, appWorkDir {}", jobProps, appWorkDir); this.helixManager = helixManager; this.helixTaskDriver = new TaskDriver(this.helixManager); this.runningMap = runningMap; this.appWorkDir = appWorkDir; this.inputWorkUnitDir = new Path(appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME); this.outputTaskStateDir = new Path(this.appWorkDir, GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME + Path.SEPARATOR + this.jobContext.getJobId()); this.helixWorkFlowName = this.jobContext.getJobId(); this.jobContext.getJobState().setJobLauncherType(LauncherTypeEnum.CLUSTER); this.stateSerDeRunnerThreads = Integer.parseInt(jobProps.getProperty(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY, Integer.toString(ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS))); jobConfig = ConfigUtils.propertiesToConfig(jobProps); this.workFlowExpiryTimeSeconds = ConfigUtils.getLong(jobConfig, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_EXPIRY_TIME_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_EXPIRY_TIME_SECONDS); this.helixJobStopTimeoutSeconds = ConfigUtils.getLong(jobConfig, GobblinClusterConfigurationKeys.HELIX_JOB_STOP_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_STOP_TIMEOUT_SECONDS); this.helixWorkflowSubmissionTimeoutSeconds = ConfigUtils.getLong(jobConfig, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS); Config stateStoreJobConfig = ConfigUtils.propertiesToConfig(jobProps) .withValue(ConfigurationKeys.STATE_STORE_FS_URI_KEY, ConfigValueFactory.fromAnyRef( new URI(appWorkDir.toUri().getScheme(), null, appWorkDir.toUri().getHost(), appWorkDir.toUri().getPort(), "/", null, null).toString())); this.stateStores = new StateStores(stateStoreJobConfig, appWorkDir, GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME, appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME, appWorkDir, GobblinClusterConfigurationKeys.JOB_STATE_DIR_NAME); URI fsUri = URI.create(jobProps.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)); this.fs = FileSystem.get(fsUri, new Configuration()); this.taskStateCollectorService = new TaskStateCollectorService(jobProps, this.jobContext.getJobState(), this.eventBus, this.eventSubmitter, this.stateStores.getTaskStateStore(), this.outputTaskStateDir, this.getIssueRepository()); this.helixMetrics = helixMetrics; this.workUnitToHelixConfig = new HashMap<>(); this.taskRetryer = RetryerBuilder.<Boolean>newBuilder() .retryIfException() .withStopStrategy(StopStrategies.stopAfterAttempt(3)).build(); startCancellationExecutor(); } @Override public void close() throws IOException { try { executeCancellation(); } finally { super.close(); } } public String getJobId() { return this.jobContext.getJobId(); } @Override protected void runWorkUnits(List<WorkUnit> workUnits) throws Exception { try { CountEventBuilder countEventBuilder = new CountEventBuilder(JobEvent.WORK_UNITS_CREATED, workUnits.size()); this.eventSubmitter.submit(countEventBuilder); LOGGER.info("Emitting WorkUnitsCreated Count: " + countEventBuilder.getCount()); long workUnitStartTime = System.currentTimeMillis(); workUnits.forEach((k) -> k.setProp(ConfigurationKeys.WORK_UNIT_CREATION_TIME_IN_MILLIS, workUnitStartTime)); // Start the output TaskState collector service this.taskStateCollectorService.startAsync().awaitRunning(); TimingEvent jobSubmissionTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.HELIX_JOB_SUBMISSION); synchronized (this.cancellationRequest) { if (!this.cancellationRequested) { long submitStart = System.currentTimeMillis(); if (helixMetrics.isPresent()) { helixMetrics.get().submitMeter.mark(); } submitJobToHelix(createHelixJob(workUnits)); if (helixMetrics.isPresent()) { this.helixMetrics.get().updateTimeForHelixSubmit(submitStart); } jobSubmissionTimer.stop(); LOGGER.info(String.format("Submitted job %s to Helix", this.jobContext.getJobId())); this.jobSubmitted = true; } else { LOGGER.warn("Job {} not submitted to Helix as it was requested to be cancelled.", this.jobContext.getJobId()); } } TimingEvent jobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.HELIX_JOB_RUN); long waitStart = System.currentTimeMillis(); waitForJobCompletion(); if (helixMetrics.isPresent()) { this.helixMetrics.get().updateTimeForHelixWait(waitStart); } jobRunTimer.stop(); LOGGER.info(String.format("Job %s completed", this.jobContext.getJobId())); } finally { // The last iteration of output TaskState collecting will run when the collector service gets stopped this.taskStateCollectorService.stopAsync().awaitTerminated(); cleanupWorkingDirectory(); } } @Override protected void executeCancellation() { if (this.jobSubmitted) { try { if (this.cancellationRequested && !this.cancellationExecuted) { boolean cancelByDelete = ConfigUtils.getBoolean(jobConfig, GobblinClusterConfigurationKeys.CANCEL_HELIX_JOB_BY_DELETE, GobblinClusterConfigurationKeys.DEFAULT_CANCEL_HELIX_JOB_BY_DELETE); HelixUtils.cancelWorkflow(this.helixWorkFlowName, this.helixManager, helixJobStopTimeoutSeconds * 1000, cancelByDelete); log.info("Canceled the workflow {}", this.helixWorkFlowName); } } catch (RuntimeException e) { // Cancellation may throw an exception, but Helix set the job state to STOP/DELETE and it should eventually be cleaned up // We will keep this.cancellationExecuted and this.cancellationRequested to true and not propagate the exception log.error("Failed to cancel workflow {} in Helix", helixWorkFlowName, e); } catch (InterruptedException e) { log.error("Thread interrupted while trying to cancel the workflow {} in Helix", helixWorkFlowName); Thread.currentThread().interrupt(); } } } protected void removeTasksFromCurrentJob(List<String> workUnitIdsToRemove) throws IOException, ExecutionException, RetryException { String jobName = this.jobContext.getJobId(); try (ParallelRunner stateSerDeRunner = new ParallelRunner(this.stateSerDeRunnerThreads, this.fs)) { for (String workUnitId : workUnitIdsToRemove) { taskRetryer.call(new Callable<Boolean>() { @Override public Boolean call() throws Exception { String taskId = workUnitToHelixConfig.get(workUnitId).getId(); boolean remove = HelixUtils.deleteTaskFromHelixJob(helixWorkFlowName, jobName, taskId, helixTaskDriver); if (remove) { log.info(String.format("Removed helix task %s with gobblin task id %s from helix job %s:%s ", taskId, workUnitId, helixWorkFlowName, jobName)); } else { throw new IOException( String.format("Cannot remove task %s from helix job %s:%s", workUnitId, helixWorkFlowName, jobName)); } return true; } }); deleteWorkUnitFromStateStore(workUnitId, stateSerDeRunner); log.info(String.format("remove task state for %s in state store", workUnitId)); this.workUnitToHelixConfig.remove(workUnitId); } } } protected void addTasksToCurrentJob(List<WorkUnit> workUnitsToAdd) throws IOException, ExecutionException, RetryException { String jobName = this.jobContext.getJobId(); try (ParallelRunner stateSerDeRunner = new ParallelRunner(this.stateSerDeRunnerThreads, this.fs)) { for (WorkUnit workunit : workUnitsToAdd) { TaskConfig taskConfig = getTaskConfig(workunit, stateSerDeRunner); this.taskRetryer.call(new Callable<Boolean>() { @Override public Boolean call() throws Exception { boolean added = HelixUtils.addTaskToHelixJob(helixWorkFlowName, jobName, taskConfig, helixTaskDriver); if (added) { log.info( String.format("Added task %s to helix job %s:%s ", workunit.getId(), helixWorkFlowName, jobName)); } else { log.error( String.format("Failed to add task %s to helix job %s:%s ", workunit.getId(), helixWorkFlowName, jobName)); throw new IOException( String.format("Cannot add task %s to helix job %s:%s", workunit.getId(), helixWorkFlowName, jobName)); } return true; } }); } } } /** * Create a job from a given batch of {@link WorkUnit}s. */ JobConfig.Builder createHelixJob(List<WorkUnit> workUnits) throws IOException { Map<String, TaskConfig> taskConfigMap = Maps.newHashMap(); try (ParallelRunner stateSerDeRunner = new ParallelRunner(this.stateSerDeRunnerThreads, this.fs)) { int multiTaskIdSequence = 0; for (WorkUnit workUnit : workUnits) { if (workUnit.isMultiWorkUnit()) { workUnit.setId(JobLauncherUtils.newMultiTaskId(this.jobContext.getJobId(), multiTaskIdSequence++)); } addWorkUnit(workUnit, stateSerDeRunner, taskConfigMap); } Path jobStateFilePath; // write the job.state using the state store if present, otherwise serialize directly to the file if (this.stateStores.haveJobStateStore()) { jobStateFilePath = GobblinClusterUtils.getJobStateFilePath(true, this.appWorkDir, this.jobContext.getJobId()); this.stateStores.getJobStateStore() .put(jobStateFilePath.getParent().getName(), jobStateFilePath.getName(), this.jobContext.getJobState()); } else { jobStateFilePath = GobblinClusterUtils.getJobStateFilePath(false, this.appWorkDir, this.jobContext.getJobId()); SerializationUtils.serializeState(this.fs, jobStateFilePath, this.jobContext.getJobState()); } // Block on persistence of all workunits to be finished. // It is necessary when underlying storage being slow and Helix activate task-execution before the workunit being persisted. stateSerDeRunner.waitForTasks(Long.MAX_VALUE); LOGGER.debug("GobblinHelixJobLauncher.createHelixJob: jobStateFilePath {}, jobState {} jobProperties {}", jobStateFilePath, this.jobContext.getJobState().toString(), this.jobContext.getJobState().getProperties()); return translateGobblinJobConfigToHelixJobConfig(this.jobContext.getJobState(), workUnits, taskConfigMap); } } /** * Populate {@link JobConfig.Builder} with relevant gobblin job-configurations. */ JobConfig.Builder translateGobblinJobConfigToHelixJobConfig(JobState gobblinJobState, List<WorkUnit> workUnits, Map<String, TaskConfig> taskConfigMap) { JobConfig.Builder jobConfigBuilder = new JobConfig.Builder(); // Helix task attempts = retries + 1 (fallback to general task retry for backward compatibility) jobConfigBuilder.setMaxAttemptsPerTask( gobblinJobState.getPropAsInt(GobblinClusterConfigurationKeys.HELIX_TASK_MAX_ATTEMPTS_KEY, gobblinJobState.getPropAsInt(ConfigurationKeys.MAX_TASK_RETRIES_KEY, ConfigurationKeys.DEFAULT_MAX_TASK_RETRIES)) + 1); // Helix task timeout (fallback to general task timeout for backward compatibility) jobConfigBuilder.setTimeoutPerTask( gobblinJobState.getPropAsLong(GobblinClusterConfigurationKeys.HELIX_TASK_TIMEOUT_SECONDS, gobblinJobState.getPropAsLong(ConfigurationKeys.TASK_TIMEOUT_SECONDS, ConfigurationKeys.DEFAULT_TASK_TIMEOUT_SECONDS)) * 1000); jobConfigBuilder.setFailureThreshold(workUnits.size()); jobConfigBuilder.addTaskConfigMap(taskConfigMap).setCommand(GobblinTaskRunner.GOBBLIN_TASK_FACTORY_NAME); jobConfigBuilder.setNumConcurrentTasksPerInstance( ConfigUtils.getInt(jobConfig, GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY, GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT)); if (this.jobConfig.hasPath(GobblinClusterConfigurationKeys.HELIX_JOB_TAG_KEY)) { String jobTag = this.jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_TAG_KEY); log.info("Job {} has tags associated : {}", this.jobContext.getJobId(), jobTag); jobConfigBuilder.setInstanceGroupTag(jobTag); } if (this.jobConfig.hasPath(GobblinClusterConfigurationKeys.HELIX_JOB_TYPE_KEY)) { String jobType = this.jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_TYPE_KEY); log.info("Job {} has types associated : {}", this.jobContext.getJobId(), jobType); jobConfigBuilder.setJobType(jobType); } if (Task.getExecutionModel(ConfigUtils.configToState(jobConfig)).equals(ExecutionModel.STREAMING)) { jobConfigBuilder.setRebalanceRunningTask(true); } jobConfigBuilder.setExpiry( gobblinJobState.getPropAsLong(GobblinClusterConfigurationKeys.HELIX_WORKFLOW_EXPIRY_TIME_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_EXPIRY_TIME_SECONDS)); Map<String, String> jobConfigMap = new HashMap<>(); if (this.jobConfig.hasPath(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS)) { jobConfigMap.put(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS, jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS)); log.info("Job {} has specific memory requirement:{}, add this config to command config map", this.jobContext.getJobId(), jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS)); } if (this.jobConfig.hasPath(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES)) { jobConfigMap.put(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES, jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES)); log.info("Job {} has specific Vcore requirement:{}, add this config to command config map", this.jobContext.getJobId(), jobConfig.getString(GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES)); } jobConfigBuilder.setJobCommandConfigMap(jobConfigMap); return jobConfigBuilder; } /** * Submit a job to run. */ private void submitJobToHelix(JobConfig.Builder jobConfigBuilder) throws Exception { HelixUtils.submitJobToWorkFlow(jobConfigBuilder, this.helixWorkFlowName, this.jobContext.getJobId(), this.helixTaskDriver, this.helixManager, Duration.ofSeconds(this.workFlowExpiryTimeSeconds), Duration.ofSeconds(this.helixWorkflowSubmissionTimeoutSeconds)); } public void launchJob(@Nullable JobListener jobListener) throws JobException { this.jobListener = jobListener; boolean isLaunched = false; this.runningMap.putIfAbsent(this.jobContext.getJobName(), false); Throwable errorInJobLaunching = null; try { if (this.runningMap.replace(this.jobContext.getJobName(), false, true)) { LOGGER.info("Job {} will be executed, add into running map.", this.jobContext.getJobId()); isLaunched = true; launchJobImpl(jobListener); } else { LOGGER.warn("Job {} will not be executed because other jobs are still running.", this.jobContext.getJobId()); } // TODO: Better error handling. The current impl swallows exceptions for jobs that were started by this method call. // One potential way to improve the error handling is to make this error swallowing configurable } catch (Throwable t) { errorInJobLaunching = t; if (isLaunched) { // Attempts to cancel the helix workflow if an error occurs during launch cancelJob(jobListener); } } finally { if (isLaunched) { if (this.runningMap.replace(this.jobContext.getJobName(), true, false)) { LOGGER.info("Job {} is done, remove from running map.", this.jobContext.getJobId()); } else { throw errorInJobLaunching == null ? new IllegalStateException( "A launched job should have running state equal to true in the running map.") : new RuntimeException("Failure in launching job:", errorInJobLaunching); } } } } /** * This method looks silly at first glance but exists for a reason. * * The method {@link GobblinHelixJobLauncher#launchJob(JobListener)} contains boiler plate for handling exceptions and * mutating the runningMap to communicate state back to the {@link GobblinHelixJobScheduler}. The boiler plate swallows * exceptions when launching the job because many use cases require that 1 job failure should not affect other jobs by causing the * entire process to fail through an uncaught exception. * * This method is useful for unit testing edge cases where we expect {@link JobException}s during the underlying launch operation. * It would be nice to not swallow exceptions, but the implications of doing that will require careful refactoring since * the class {@link GobblinHelixJobLauncher} and {@link GobblinHelixJobScheduler} are shared for 2 quite different cases * between GaaS and streaming. GaaS typically requiring many short lifetime workflows (where a failure is tolerated) and * streaming requiring a small number of long running workflows (where failure to submit is unexpected and is not * tolerated) * * @throws JobException */ @VisibleForTesting void launchJobImpl(@Nullable JobListener jobListener) throws JobException { super.launchJob(jobListener); } private TaskConfig getTaskConfig(WorkUnit workUnit, ParallelRunner stateSerDeRunner) throws IOException { String workUnitFilePath = persistWorkUnit(new Path(this.inputWorkUnitDir, this.jobContext.getJobId()), workUnit, stateSerDeRunner); Map<String, String> rawConfigMap = Maps.newHashMap(); rawConfigMap.put(GobblinClusterConfigurationKeys.WORK_UNIT_FILE_PATH, workUnitFilePath); rawConfigMap.put(ConfigurationKeys.JOB_NAME_KEY, this.jobContext.getJobName()); rawConfigMap.put(ConfigurationKeys.JOB_ID_KEY, this.jobContext.getJobId()); rawConfigMap.put(ConfigurationKeys.TASK_ID_KEY, workUnit.getId()); rawConfigMap.put(GobblinClusterConfigurationKeys.TASK_SUCCESS_OPTIONAL_KEY, "true"); TaskConfig taskConfig = TaskConfig.Builder.from(rawConfigMap); workUnitToHelixConfig.put(workUnit.getId(), taskConfig); return taskConfig; } /** * Add a single {@link WorkUnit} (flattened) to persistent storage so that worker can fetch that based on information * fetched in Helix task. */ private void addWorkUnit(WorkUnit workUnit, ParallelRunner stateSerDeRunner, Map<String, TaskConfig> taskConfigMap) throws IOException { taskConfigMap.put(workUnit.getId(), getTaskConfig(workUnit, stateSerDeRunner)); } /** * Delete a single {@link WorkUnit} (flattened) from state store. */ private void deleteWorkUnitFromStateStore(String workUnitId, ParallelRunner stateSerDeRunner) { String workUnitFilePath = workUnitToHelixConfig.get(workUnitId).getConfigMap().get(GobblinClusterConfigurationKeys.WORK_UNIT_FILE_PATH); Path workUnitFile = new Path(workUnitFilePath); final String fileName = workUnitFile.getName(); final String storeName = workUnitFile.getParent().getName(); final StateStore stateStore = JobLauncherUtils.hasMultiWorkUnitExtension(workUnitFile) ? stateStores.getMwuStateStore() : stateStores.getWuStateStore(); stateSerDeRunner.submitCallable(new Callable<Void>() { @Override public Void call() throws Exception { stateStore.delete(storeName, fileName); return null; } }, "Delete state " + fileName + " from store " + storeName); } /** * Persist a single {@link WorkUnit} (flattened) to a file. */ private String persistWorkUnit(final Path workUnitFileDir, final WorkUnit workUnit, ParallelRunner stateSerDeRunner) throws IOException { final StateStore stateStore; String workUnitFileName = workUnit.getId(); if (workUnit.isMultiWorkUnit()) { workUnitFileName += JobLauncherUtils.MULTI_WORK_UNIT_FILE_EXTENSION; stateStore = stateStores.getMwuStateStore(); } else { workUnitFileName += JobLauncherUtils.WORK_UNIT_FILE_EXTENSION; stateStore = stateStores.getWuStateStore(); } Path workUnitFile = new Path(workUnitFileDir, workUnitFileName); final String fileName = workUnitFile.getName(); final String storeName = workUnitFile.getParent().getName(); stateSerDeRunner.submitCallable(new Callable<Void>() { @Override public Void call() throws Exception { stateStore.put(storeName, fileName, workUnit); return null; } }, "Serialize state to store " + storeName + " file " + fileName); return workUnitFile.toString(); } private void waitForJobCompletion() throws InterruptedException { boolean timeoutEnabled = Boolean.parseBoolean( this.jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_ENABLED_KEY, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_TIMEOUT_ENABLED)); long timeoutInSeconds = Long.parseLong( this.jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_TIMEOUT_SECONDS)); long stoppingStateTimeoutInSeconds = PropertiesUtils.getPropAsLong(this.jobProps, GobblinClusterConfigurationKeys.HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS); try { HelixUtils.waitJobCompletion(this.helixManager, this.helixWorkFlowName, this.jobContext.getJobId(), timeoutEnabled ? Optional.of(timeoutInSeconds) : Optional.empty(), stoppingStateTimeoutInSeconds); } catch (TimeoutException te) { HelixUtils.handleJobTimeout(helixWorkFlowName, jobContext.getJobId(), helixManager, this, this.jobListener); } } /** * Delete persisted {@link WorkUnit}s and {@link JobState} upon job completion. */ private void cleanupWorkingDirectory() throws IOException { LOGGER.info("Deleting persisted work units for job " + this.jobContext.getJobId()); stateStores.getWuStateStore().delete(this.jobContext.getJobId()); // delete the directory that stores the task state files stateStores.getTaskStateStore().delete(outputTaskStateDir.getName()); LOGGER.info("Deleting job state file for job " + this.jobContext.getJobId()); if (this.stateStores.haveJobStateStore()) { this.stateStores.getJobStateStore().delete(this.jobContext.getJobId()); } else { Path jobStateFilePath = GobblinClusterUtils.getJobStateFilePath(false, this.appWorkDir, this.jobContext.getJobId()); this.fs.delete(jobStateFilePath, false); } } }
2,215
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.time.Duration; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import org.apache.helix.HelixAdmin; import org.apache.helix.HelixDataAccessor; import org.apache.helix.HelixException; import org.apache.helix.HelixManager; import org.apache.helix.HelixProperty; import org.apache.helix.PropertyKey; import org.apache.helix.manager.zk.ZKHelixManager; import org.apache.helix.model.HelixConfigScope; import org.apache.helix.model.InstanceConfig; import org.apache.helix.task.JobConfig; import org.apache.helix.task.JobContext; import org.apache.helix.task.TargetState; import org.apache.helix.task.TaskConfig; import org.apache.helix.task.TaskDriver; import org.apache.helix.task.TaskState; import org.apache.helix.task.TaskUtil; import org.apache.helix.task.Workflow; import org.apache.helix.task.WorkflowConfig; import org.apache.helix.task.WorkflowContext; import org.apache.helix.tools.ClusterSetup; import com.google.common.collect.Lists; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.util.Id; import org.apache.gobblin.util.JobLauncherUtils; import org.apache.gobblin.util.PropertiesUtils; import static org.apache.helix.task.TaskState.STOPPED; /** * A utility class for working with Gobblin on Helix. * * @author Yinan Li */ @Slf4j public class HelixUtils { /** * Create a Helix cluster for the Gobblin Cluster application. * * @param zkConnectionString the ZooKeeper connection string * @param clusterName the Helix cluster name */ public static void createGobblinHelixCluster( String zkConnectionString, String clusterName) { createGobblinHelixCluster(zkConnectionString, clusterName, true); } /** * Create a Helix cluster for the Gobblin Cluster application. * * @param zkConnectionString the ZooKeeper connection string * @param clusterName the Helix cluster name * @param overwrite true to overwrite exiting cluster, false to reuse existing cluster */ public static void createGobblinHelixCluster( String zkConnectionString, String clusterName, boolean overwrite) { ClusterSetup clusterSetup = new ClusterSetup(zkConnectionString); // Create the cluster and overwrite if it already exists clusterSetup.addCluster(clusterName, overwrite); // Helix 0.6.x requires a configuration property to have the form key=value. String autoJoinConfig = ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN + "=true"; clusterSetup.setConfig(HelixConfigScope.ConfigScopeProperty.CLUSTER, clusterName, autoJoinConfig); } /** * Get a Helix instance name. * * @param namePrefix a prefix of Helix instance names * @param instanceId an integer instance ID * @return a Helix instance name that is a concatenation of the given prefix and instance ID */ public static String getHelixInstanceName( String namePrefix, int instanceId) { return namePrefix + "_" + instanceId; } static void waitJobInitialization( HelixManager helixManager, String workflowName, String jobName, Duration timeout) throws Exception { WorkflowContext workflowContext = TaskDriver.getWorkflowContext(helixManager, workflowName); // If the helix job is deleted from some other thread or a completely external process, // method waitJobCompletion() needs to differentiate between the cases where // 1) workflowContext did not get initialized ever, in which case we need to keep waiting, or // 2) it did get initialized but deleted soon after, in which case we should stop waiting // To overcome this issue, we wait here till workflowContext gets initialized long start = System.currentTimeMillis(); while (workflowContext == null || workflowContext.getJobState(TaskUtil.getNamespacedJobName(workflowName, jobName)) == null) { if (System.currentTimeMillis() - start > timeout.toMillis()) { String errorDescription = String.format("Job cannot be initialized within %s milliseconds, considered as an error. " + "workflowName=%s, jobName=%s, timeSubmittedEpoch=%s", timeout.toMillis(), workflowName, jobName, start); log.error(errorDescription); throw new JobException(errorDescription); } workflowContext = TaskDriver.getWorkflowContext(helixManager, workflowName); Thread.sleep(TimeUnit.SECONDS.toMillis(1L)); log.info("Waiting for workflow initialization. workflowName={}, jobName={}, timeSubmittedEpoch={}, timeoutSeconds={}", workflowName, jobName, start, timeout.getSeconds()); } log.info("Workflow {} initialized. timeToInitMs={}", workflowName, System.currentTimeMillis() - start); } /** * Inject in some additional properties * @param jobProps job properties * @param inputTags list of metadata tags * @return */ public static List<? extends Tag<?>> initBaseEventTags(Properties jobProps, List<? extends Tag<?>> inputTags) { List<Tag<?>> metadataTags = Lists.newArrayList(inputTags); String jobId; // generate job id if not already set if (jobProps.containsKey(ConfigurationKeys.JOB_ID_KEY)) { jobId = jobProps.getProperty(ConfigurationKeys.JOB_ID_KEY); } else { jobId = JobLauncherUtils.newJobId(JobState.getJobNameFromProps(jobProps), PropertiesUtils.getPropAsLong(jobProps, ConfigurationKeys.FLOW_EXECUTION_ID_KEY, System.currentTimeMillis())); jobProps.put(ConfigurationKeys.JOB_ID_KEY, jobId); } String jobExecutionId = Long.toString(Id.Job.parse(jobId).getSequence()); // only inject flow tags if a flow name is defined if (jobProps.containsKey(ConfigurationKeys.FLOW_NAME_KEY)) { metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, jobProps.getProperty(ConfigurationKeys.FLOW_GROUP_KEY, ""))); metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, jobProps.getProperty(ConfigurationKeys.FLOW_NAME_KEY))); // use job execution id if flow execution id is not present metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, jobExecutionId))); } if (jobProps.containsKey(ConfigurationKeys.JOB_CURRENT_ATTEMPTS)) { metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, jobProps.getProperty(ConfigurationKeys.JOB_CURRENT_ATTEMPTS, "1"))); metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, jobProps.getProperty(ConfigurationKeys.JOB_CURRENT_GENERATION, "1"))); metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, "false")); } metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, jobProps.getProperty(ConfigurationKeys.JOB_GROUP_KEY, ""))); metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY, ""))); metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD, jobExecutionId)); log.debug("HelixUtils.addAdditionalMetadataTags: metadataTags {}", metadataTags); return metadataTags; } protected static boolean deleteTaskFromHelixJob(String workFlowName, String jobName, String taskID, TaskDriver helixTaskDriver) { try { log.info(String.format("try to delete task %s from workflow %s, job %s", taskID, workFlowName, jobName)); helixTaskDriver.deleteTask(workFlowName, jobName, taskID); } catch (Exception e) { e.printStackTrace(); return !helixTaskDriver.getJobConfig(TaskUtil.getNamespacedJobName(workFlowName, jobName)).getMapConfigs().containsKey(taskID); } return true; } protected static boolean addTaskToHelixJob(String workFlowName, String jobName, TaskConfig taskConfig, TaskDriver helixTaskDriver) { String taskId = taskConfig.getId(); try { log.info(String.format("try to add task %s to workflow %s, job %s", taskId, workFlowName, jobName)); helixTaskDriver.addTask(workFlowName, jobName, taskConfig); } catch (Exception e) { e.printStackTrace(); JobContext jobContext = helixTaskDriver.getJobContext(TaskUtil.getNamespacedJobName(workFlowName, jobName)); return jobContext.getTaskIdPartitionMap().containsKey(taskId); } return true; } public static void submitJobToWorkFlow(JobConfig.Builder jobConfigBuilder, String workFlowName, String jobName, TaskDriver helixTaskDriver, HelixManager helixManager, Duration workFlowExpiryTime, Duration submissionTimeout) throws Exception { WorkflowConfig workFlowConfig = new WorkflowConfig.Builder().setExpiry(workFlowExpiryTime.getSeconds(), TimeUnit.SECONDS).build(); // Create a workflow for each Gobblin job using the Gobblin job name as the workflow name Workflow workFlow = new Workflow.Builder(workFlowName).setWorkflowConfig(workFlowConfig).addJob(jobName, jobConfigBuilder).build(); // start the workflow helixTaskDriver.start(workFlow); log.info("Created a workflow {}", workFlowName); waitJobInitialization(helixManager, workFlowName, jobName, submissionTimeout); } static void waitJobCompletion(HelixManager helixManager, String workFlowName, String jobName, Optional<Long> timeoutInSeconds, Long stoppingStateTimeoutInSeconds) throws InterruptedException, TimeoutException { log.info("Waiting for job {} to complete...", jobName); long endTime = 0; long jobStartTimeMillis = System.currentTimeMillis(); if (timeoutInSeconds.isPresent()) { endTime = jobStartTimeMillis + timeoutInSeconds.get() * 1000; } Long stoppingStateEndTime = null; while (!timeoutInSeconds.isPresent() || System.currentTimeMillis() <= endTime) { WorkflowContext workflowContext = TaskDriver.getWorkflowContext(helixManager, workFlowName); if (workflowContext != null) { TaskState jobState = workflowContext.getJobState(TaskUtil.getNamespacedJobName(workFlowName, jobName)); switch (jobState) { case STOPPED: // user requested cancellation, which is executed by executeCancellation() log.info("Job {} is cancelled, it will be deleted now.", jobName); HelixUtils.deleteStoppedHelixJob(helixManager, workFlowName, jobName); return; case FAILED: case COMPLETED: return; case STOPPING: log.info("Waiting for job {} to complete... State - {}", jobName, jobState); Thread.sleep(TimeUnit.SECONDS.toMillis(1L)); if (stoppingStateEndTime == null) { stoppingStateEndTime = System.currentTimeMillis() + stoppingStateTimeoutInSeconds * 1000; } // Workaround for a Helix bug where a job may be stuck in the STOPPING state due to an unresponsive task. if (System.currentTimeMillis() > stoppingStateEndTime) { log.info("Deleting workflow {} since it stuck in STOPPING state for more than {} seconds", workFlowName, stoppingStateTimeoutInSeconds); new TaskDriver(helixManager).delete(workFlowName); log.info("Deleted workflow {}", workFlowName); return; } default: log.info("Waiting for job {} to complete... State - {}", jobName, jobState); Thread.sleep(TimeUnit.SECONDS.toMillis(10L)); } } else { // We have waited for WorkflowContext to get initialized, // so it is found null here, it must have been deleted in job cancellation process. log.info("WorkflowContext not found. Job is probably cancelled."); return; } } throw new TimeoutException("task driver wait time [" + timeoutInSeconds + " sec] is expired."); } static boolean isJobFinished(String workflowName, String jobName, HelixManager helixManager) { WorkflowContext workflowContext = TaskDriver.getWorkflowContext(helixManager, workflowName); if (workflowContext == null) { // this workflow context doesn't exist, considered as finished. return true; } TaskState jobState = workflowContext.getJobState(TaskUtil.getNamespacedJobName(workflowName, jobName)); switch (jobState) { case STOPPED: case FAILED: case COMPLETED: case ABORTED: case TIMED_OUT: return true; default: return false; } } // Cancel the job by calling either Delete or Stop Helix API public static void cancelWorkflow(String workflowName, HelixManager helixManager, long timeOut, boolean cancelByDelete) throws InterruptedException { TaskDriver taskDriver = new TaskDriver(helixManager); if (cancelByDelete) { taskDriver.deleteAndWaitForCompletion(workflowName, timeOut); log.info("Canceling Helix workflow: {} through delete API", workflowName); } else { taskDriver.waitToStop(workflowName, timeOut); log.info("Canceling Helix workflow: {} through stop API", workflowName); } } static void deleteWorkflow (String workflowName, HelixManager helixManager, long timeOut) throws InterruptedException { TaskDriver taskDriver = new TaskDriver(helixManager); taskDriver.deleteAndWaitForCompletion(workflowName, timeOut); } static void handleJobTimeout(String workFlowName, String jobName, HelixManager helixManager, Object jobLauncher, JobListener jobListener) throws InterruptedException { try { log.warn("Timeout occurred for job launcher {} with job {}", jobLauncher.getClass(), jobName); if (jobLauncher instanceof GobblinHelixJobLauncher) { ((GobblinHelixJobLauncher) jobLauncher).cancelJob(jobListener); } else if (jobLauncher instanceof GobblinHelixDistributeJobExecutionLauncher) { ((GobblinHelixDistributeJobExecutionLauncher) jobLauncher).cancel(); } } catch (JobException e) { throw new RuntimeException("Unable to cancel job " + jobName + ": ", e); } // Make sure the job is fully cleaned up HelixUtils.deleteStoppedHelixJob(helixManager, workFlowName, jobName); log.info("Stopped and deleted the workflow {}", workFlowName); } /** * Deletes the stopped Helix Workflow. * Caller should stop the Workflow before calling this method. * @param helixManager helix manager * @param workFlowName workflow needed to be deleted * @param jobName helix job name * @throws InterruptedException */ private static void deleteStoppedHelixJob(HelixManager helixManager, String workFlowName, String jobName) throws InterruptedException { long deleteTimeout = 10000L; WorkflowContext workflowContext = TaskDriver.getWorkflowContext(helixManager, workFlowName); while (workflowContext != null && workflowContext.getJobState(TaskUtil.getNamespacedJobName(workFlowName, jobName)) != STOPPED) { log.info("Waiting for job {} to stop...", jobName); workflowContext = TaskDriver.getWorkflowContext(helixManager, workFlowName); Thread.sleep(1000); } if (workflowContext != null) { // deleting the entire workflow, as one workflow contains only one job new TaskDriver(helixManager).deleteAndWaitForCompletion(workFlowName, deleteTimeout); } log.info("Workflow deleted."); } /** * Returns the currently running Helix Workflow Ids given an {@link Iterable} of Gobblin job names. The method returns a * {@link java.util.Map} from Gobblin job name to the corresponding Helix Workflow Id. This method iterates * over all Helix workflows, and obtains the jobs of each workflow from its jobDag. * * NOTE: This call is expensive as it results in listing of znodes and subsequently, multiple ZK calls to get the job * configuration for each HelixJob. Ideally, this method should be called infrequently e.g. when a job is deleted/cancelled. * * @param jobNames a list of Gobblin job names. * @return a map from jobNames to their Helix Workflow Ids. * @throws GobblinHelixUnexpectedStateException when there is inconsistent helix state. This implies that we should retry the call * to avoid acting on stale data */ public static Map<String, String> getWorkflowIdsFromJobNames(HelixManager helixManager, Collection<String> jobNames) throws GobblinHelixUnexpectedStateException { TaskDriver taskDriver = new TaskDriver(helixManager); return getWorkflowIdsFromJobNames(taskDriver, jobNames); } public static Map<String, String> getWorkflowIdsFromJobNames(TaskDriver taskDriver, Collection<String> jobNames) throws GobblinHelixUnexpectedStateException { Map<String, String> jobNameToWorkflowId = new HashMap<>(); Map<String, WorkflowConfig> workflowConfigMap = taskDriver.getWorkflows(); for (Map.Entry<String, WorkflowConfig> entry : workflowConfigMap.entrySet()) { String workflow = entry.getKey(); WorkflowConfig workflowConfig = entry.getValue(); if (workflowConfig == null) { // As of Helix 1.0.2 implementation, this in theory shouldn't happen. But this null check is here in case implementation changes // because the API doesn't technically prohibit null configs, maps allowing null values is implementation based, and we want to fail loudly with a clear root cause. // the caller of this API should retry this API call throw new GobblinHelixUnexpectedStateException("Received null workflow config from Helix. We should not see any null configs when reading all workflows. workflowId=%s", workflow); } //Filter out any stale Helix workflows which are not running. if (workflowConfig.getTargetState() != TargetState.START) { continue; } Set<String> helixJobs = workflowConfig.getJobDag().getAllNodes(); for (String helixJob : helixJobs) { JobConfig jobConfig = taskDriver.getJobConfig(helixJob); if (jobConfig == null) { throw new GobblinHelixUnexpectedStateException("Received null jobConfig from Helix. We should not see any null configs when reading all helixJobs. helixJob=%s", helixJob); } Iterator<TaskConfig> taskConfigIterator = jobConfig.getTaskConfigMap().values().iterator(); if (taskConfigIterator.hasNext()) { TaskConfig taskConfig = taskConfigIterator.next(); String jobName = taskConfig.getConfigMap().get(ConfigurationKeys.JOB_NAME_KEY); if (jobNames.contains(jobName)) { if (!jobNameToWorkflowId.containsKey(jobName)) { jobNameToWorkflowId.put(jobName, workflow); } else { log.warn("JobName {} previously found to have WorkflowId {}; found " + " a different WorkflowId {} for the job; " + "Skipping this entry", jobName, jobNameToWorkflowId.get(jobName), workflow); } break; } } } } return jobNameToWorkflowId; } /** * A utility method that returns all current live instances in a given Helix cluster. This method assumes that * the passed {@link HelixManager} instance is already connected. * @param helixManager * @return all live instances in the Helix cluster. */ public static List<String> getLiveInstances(HelixManager helixManager) { HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey liveInstancesKey = accessor.keyBuilder().liveInstances(); return accessor.getChildNames(liveInstancesKey); } /** * Getting all instances (Helix Participants) in cluster at this moment. * Note that the raw result could contain AppMaster node and replanner node. * @param filterString Helix instances whose name containing fitlerString will pass filtering. */ public static Set<String> getParticipants(HelixDataAccessor helixDataAccessor, String filterString) { PropertyKey.Builder keyBuilder = helixDataAccessor.keyBuilder(); PropertyKey liveInstance = keyBuilder.liveInstances(); Map<String, HelixProperty> childValuesMap = helixDataAccessor.getChildValuesMap(liveInstance); return childValuesMap.keySet().stream().filter(x -> filterString.isEmpty() || x.contains(filterString)).collect(Collectors.toSet()); } public static boolean isInstanceLive(HelixManager helixManager, String instanceName) { HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey liveInstanceKey = accessor.keyBuilder().liveInstance(instanceName); return accessor.getProperty(liveInstanceKey) != null; } public static void dropInstanceIfExists(HelixAdmin admin, String clusterName, String helixInstanceName) { try { admin.dropInstance(clusterName, new InstanceConfig(helixInstanceName)); } catch (HelixException e) { log.error("Could not drop instance: {} due to: {}", helixInstanceName, e); } } }
2,216
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/NoopReplyHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import org.apache.helix.messaging.AsyncCallback; import org.apache.helix.model.Message; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Class that handles Helix messaging response via no-op. * * @author Abhishek Tiwari */ public class NoopReplyHandler extends AsyncCallback { private static final Logger LOGGER = LoggerFactory.getLogger(NoopReplyHandler.class); private String bootstrapUrl; private String bootstrapTime; public NoopReplyHandler() { } public void onTimeOut() { LOGGER.error("Timed out"); } public void onReplyMessage(Message message) { LOGGER.info("Received reply: " + message); } }
2,217
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobScheduler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.locks.Lock; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.rholder.retry.AttemptTimeLimiters; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import com.google.common.util.concurrent.Striped; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.cluster.event.CancelJobConfigArrivalEvent; import org.apache.gobblin.cluster.event.DeleteJobConfigArrivalEvent; import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent; import org.apache.gobblin.cluster.event.UpdateJobConfigArrivalEvent; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.listeners.JobListener; import org.apache.gobblin.scheduler.JobScheduler; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.PropertiesUtils; /** * An extension to {@link JobScheduler} that schedules and runs * Gobblin jobs on Helix. * * <p> The actual job running logic is handled by * {@link HelixRetriggeringJobCallable}. This callable will first * determine if this job should be launched from the same node * where the scheduler is running, or from a remote node. * * <p> If the job should be launched from the scheduler node, * {@link GobblinHelixJobLauncher} is invoked. Else the * {@link GobblinHelixDistributeJobExecutionLauncher} is invoked. * * <p> More details can be found at {@link HelixRetriggeringJobCallable}. */ @Alpha public class GobblinHelixJobScheduler extends JobScheduler implements StandardMetricsBridge { private static final Logger LOGGER = LoggerFactory.getLogger(GobblinHelixJobScheduler.class); private static final String COMMON_JOB_PROPS = "gobblin.common.job.props"; private final Properties commonJobProperties; private final HelixManager jobHelixManager; private final Optional<HelixManager> taskDriverHelixManager; private final EventBus eventBus; private final Path appWorkDir; private final List<? extends Tag<?>> metadataTags; private final ConcurrentHashMap<String, Boolean> jobRunningMap; private final MutableJobCatalog jobCatalog; private final MetricContext metricContext; final GobblinHelixMetrics helixMetrics; final GobblinHelixJobSchedulerMetrics jobSchedulerMetrics; final GobblinHelixJobLauncherMetrics launcherMetrics; final GobblinHelixPlanningJobLauncherMetrics planningJobLauncherMetrics; final HelixJobsMapping jobsMapping; final Striped<Lock> locks = Striped.lazyWeakLock(256); private final long helixWorkflowListingTimeoutMillis; private boolean startServicesCompleted; private final long helixJobStopTimeoutMillis; /** * The throttling timeout prevents helix workflows with the same job name / URI from being submitted * more than once within the timeout period. This timeout is not reset by deletes / cancels, meaning that * if you delete a workflow within the timeout period, you cannot reschedule until the timeout period is complete. * However, if there is an error when launching the job, you can immediately reschedule the flow. <br><br> * * NOTE: This throttle timeout period starts when the job launcher thread picks up the runnable. Meaning that the * time it takes to submit to Helix and start running the flow is also included as part of the timeout period */ private final Duration jobSchedulingThrottleTimeout; private ConcurrentHashMap<String, Instant> jobNameToNextSchedulableTime; private boolean isThrottleEnabled; private Clock clock; public GobblinHelixJobScheduler(Config sysConfig, HelixManager jobHelixManager, Optional<HelixManager> taskDriverHelixManager, EventBus eventBus, Path appWorkDir, List<? extends Tag<?>> metadataTags, SchedulerService schedulerService, MutableJobCatalog jobCatalog, Clock clock) throws Exception { super(ConfigUtils.configToProperties(sysConfig), schedulerService); this.commonJobProperties = ConfigUtils.configToProperties(ConfigUtils.getConfigOrEmpty(sysConfig, COMMON_JOB_PROPS)); this.jobHelixManager = jobHelixManager; this.taskDriverHelixManager = taskDriverHelixManager; this.eventBus = eventBus; this.jobRunningMap = new ConcurrentHashMap<>(); this.appWorkDir = appWorkDir; this.metadataTags = metadataTags; this.jobCatalog = jobCatalog; this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(properties), this.getClass()); int metricsWindowSizeInMin = ConfigUtils.getInt(sysConfig, ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES, ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES); this.launcherMetrics = new GobblinHelixJobLauncherMetrics("launcherInScheduler", this.metricContext, metricsWindowSizeInMin); this.jobSchedulerMetrics = new GobblinHelixJobSchedulerMetrics(this.jobExecutor, this.metricContext, metricsWindowSizeInMin); this.jobsMapping = new HelixJobsMapping(ConfigUtils.propertiesToConfig(properties), PathUtils.getRootPath(appWorkDir).toUri(), appWorkDir.toString()); this.planningJobLauncherMetrics = new GobblinHelixPlanningJobLauncherMetrics("planningLauncherInScheduler", this.metricContext, metricsWindowSizeInMin, this.jobsMapping); this.helixMetrics = new GobblinHelixMetrics("helixMetricsInJobScheduler", this.metricContext, metricsWindowSizeInMin); this.startServicesCompleted = false; this.helixJobStopTimeoutMillis = ConfigUtils.getLong(sysConfig, GobblinClusterConfigurationKeys.HELIX_JOB_STOP_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_STOP_TIMEOUT_SECONDS) * 1000; this.helixWorkflowListingTimeoutMillis = ConfigUtils.getLong(sysConfig, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_LISTING_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_LISTING_TIMEOUT_SECONDS) * 1000; this.jobSchedulingThrottleTimeout = Duration.of(ConfigUtils.getLong(sysConfig, GobblinClusterConfigurationKeys.HELIX_JOB_SCHEDULING_THROTTLE_TIMEOUT_SECONDS_KEY, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_SCHEDULING_THROTTLE_TIMEOUT_SECONDS_KEY), ChronoUnit.SECONDS); this.jobNameToNextSchedulableTime = new ConcurrentHashMap<>(); this.isThrottleEnabled = ConfigUtils.getBoolean(sysConfig, GobblinClusterConfigurationKeys.HELIX_JOB_SCHEDULING_THROTTLE_ENABLED_KEY, GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_SCHEDULING_THROTTLE_ENABLED_KEY); this.clock = clock; } public GobblinHelixJobScheduler(Config sysConfig, HelixManager jobHelixManager, Optional<HelixManager> taskDriverHelixManager, EventBus eventBus, Path appWorkDir, List<? extends Tag<?>> metadataTags, SchedulerService schedulerService, MutableJobCatalog jobCatalog) throws Exception { this(sysConfig, jobHelixManager, taskDriverHelixManager, eventBus, appWorkDir, metadataTags, schedulerService, jobCatalog, Clock.systemUTC()); } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { return ImmutableList.of(this.launcherMetrics, this.jobSchedulerMetrics, this.planningJobLauncherMetrics, this.helixMetrics); } @Override protected void startUp() throws Exception { this.eventBus.register(this); super.startUp(); this.startServicesCompleted = true; } @Override public void scheduleJob(Properties jobProps, JobListener jobListener) throws JobException { try { while (!startServicesCompleted) { LOGGER.info("{} service is not fully up, waiting here...", this.getClass().getName()); Thread.sleep(1000); } scheduleJob(jobProps, jobListener, Maps.newHashMap(), GobblinHelixJob.class); } catch (Exception e) { throw new JobException("Failed to schedule job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e); } } @Override protected void startServices() throws Exception { boolean cleanAllDistJobs = PropertiesUtils.getPropAsBoolean(this.properties, GobblinClusterConfigurationKeys.CLEAN_ALL_DIST_JOBS, String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_CLEAN_ALL_DIST_JOBS)); if (cleanAllDistJobs) { for (org.apache.gobblin.configuration.State state : this.jobsMapping.getAllStates()) { String jobName = state.getId(); LOGGER.info("Delete mapping for job " + jobName); this.jobsMapping.deleteMapping(jobName); } } } @Override public void runJob(Properties jobProps, JobListener jobListener) throws JobException { new HelixRetriggeringJobCallable(this, this.jobCatalog, this.properties, jobProps, jobListener, this.planningJobLauncherMetrics, this.helixMetrics, this.appWorkDir, this.jobHelixManager, this.taskDriverHelixManager, this.jobsMapping, this.locks, this.metricContext).call(); } @Override public GobblinHelixJobLauncher buildJobLauncher(Properties jobProps) throws Exception { Properties combinedProps = new Properties(); combinedProps.putAll(properties); combinedProps.putAll(jobProps); return new GobblinHelixJobLauncher(combinedProps, this.jobHelixManager, this.appWorkDir, this.metadataTags, this.jobRunningMap, Optional.of(this.helixMetrics)); } public Future<?> scheduleJobImmediately(Properties jobProps, JobListener jobListener) { HelixRetriggeringJobCallable retriggeringJob = new HelixRetriggeringJobCallable(this, this.jobCatalog, this.properties, jobProps, jobListener, this.planningJobLauncherMetrics, this.helixMetrics, this.appWorkDir, this.jobHelixManager, this.taskDriverHelixManager, this.jobsMapping, this.locks, this.metricContext); final Future<?> future = this.jobExecutor.submit(retriggeringJob); return new Future() { @Override public boolean cancel(boolean mayInterruptIfRunning) { if (!GobblinHelixJobScheduler.this.isCancelRequested()) { return false; } boolean result = true; try { retriggeringJob.cancel(); } catch (JobException e) { LOGGER.error("Failed to cancel job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e); result = false; } if (mayInterruptIfRunning) { result &= future.cancel(true); } return result; } @Override public boolean isCancelled() { return future.isCancelled(); } @Override public boolean isDone() { return future.isDone(); } @Override public Object get() throws InterruptedException, ExecutionException { return future.get(); } @Override public Object get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return future.get(timeout, unit); } }; } @Subscribe public synchronized void handleNewJobConfigArrival(NewJobConfigArrivalEvent newJobArrival) { String jobName = newJobArrival.getJobName(); LOGGER.info("Received new job configuration of job " + jobName); Instant nextSchedulableTime = jobNameToNextSchedulableTime.getOrDefault(jobName, Instant.EPOCH); if (this.isThrottleEnabled && clock.instant().isBefore(nextSchedulableTime)) { LOGGER.info("Adding new job is skipped for job {}. Current time is {} and the next schedulable time would be {}", jobName, clock.instant(), nextSchedulableTime ); return; } if (isThrottleEnabled) { nextSchedulableTime = clock.instant().plus(jobSchedulingThrottleTimeout); jobNameToNextSchedulableTime.put(jobName, nextSchedulableTime); } try { Properties jobProps = new Properties(); jobProps.putAll(this.commonJobProperties); jobProps.putAll(newJobArrival.getJobConfig()); // set uri so that we can delete this job later jobProps.setProperty(GobblinClusterConfigurationKeys.JOB_SPEC_URI, jobName); this.jobSchedulerMetrics.updateTimeBeforeJobScheduling(jobProps); GobblinHelixJobLauncherListener listener = isThrottleEnabled ? new GobblinThrottlingHelixJobLauncherListener(this.launcherMetrics, jobNameToNextSchedulableTime) : new GobblinHelixJobLauncherListener(this.launcherMetrics); if (jobProps.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) { LOGGER.info("Scheduling job " + jobName); scheduleJob(jobProps, listener); } else { LOGGER.info("No job schedule found, so running job " + jobName); this.jobExecutor.execute(new NonScheduledJobRunner(jobProps, listener)); } } catch (JobException je) { LOGGER.error("Failed to schedule or run job {} . Reset the next scheduable time to {}", jobName, Instant.EPOCH, je); if (isThrottleEnabled) { jobNameToNextSchedulableTime.put(jobName, Instant.EPOCH); } } } @Subscribe public synchronized void handleUpdateJobConfigArrival(UpdateJobConfigArrivalEvent updateJobArrival) { LOGGER.info("Received update for job configuration of job " + updateJobArrival.getJobName()); String jobName = updateJobArrival.getJobName(); Instant nextSchedulableTime = jobNameToNextSchedulableTime.getOrDefault(jobName, Instant.EPOCH); if (this.isThrottleEnabled && clock.instant().isBefore(nextSchedulableTime)) { LOGGER.info("Replanning is skipped for job {}. Current time is {} and the next schedulable time would be {}", jobName, clock.instant(), nextSchedulableTime ); return; } try { handleDeleteJobConfigArrival(new DeleteJobConfigArrivalEvent(updateJobArrival.getJobName(), updateJobArrival.getJobConfig())); } catch (Exception je) { LOGGER.error("Failed to update job " + updateJobArrival.getJobName(), je); } try { handleNewJobConfigArrival(new NewJobConfigArrivalEvent(updateJobArrival.getJobName(), updateJobArrival.getJobConfig())); } catch (Exception je) { LOGGER.error("Failed to update job " + updateJobArrival.getJobName(), je); } } private void waitForJobCompletion(String jobName) { while (this.jobRunningMap.getOrDefault(jobName, false)) { LOGGER.info("Waiting for job {} to stop...", jobName); try { Thread.sleep(1000); } catch (InterruptedException e) { LOGGER.warn("Interrupted exception encountered: ", e); } } } /*** * Deleting a workflow with throttling enabled means that the next * schedulable time for the workflow will remain unchanged. * Note: In such case, it is required to wait until the throttle * timeout period elapses before the workflow can be rescheduled. * * @param deleteJobArrival * @throws InterruptedException */ @Subscribe public synchronized void handleDeleteJobConfigArrival(DeleteJobConfigArrivalEvent deleteJobArrival) throws InterruptedException { LOGGER.info("Received delete for job configuration of job " + deleteJobArrival.getJobName()); try { unscheduleJob(deleteJobArrival.getJobName()); cancelJobIfRequired(deleteJobArrival); } catch (JobException je) { LOGGER.error("Failed to unschedule job " + deleteJobArrival.getJobName()); } } @Subscribe public void handleCancelJobConfigArrival(CancelJobConfigArrivalEvent cancelJobArrival) throws InterruptedException { String jobName = cancelJobArrival.getJoburi(); LOGGER.info("Received cancel for job configuration of job " + jobName); Optional<String> distributedJobMode; Optional<String> planningJob = Optional.empty(); Optional<String> actualJob = Optional.empty(); boolean cancelByDelete = PropertiesUtils.getPropAsBoolean(this.commonJobProperties, GobblinClusterConfigurationKeys.CANCEL_HELIX_JOB_BY_DELETE, String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_CANCEL_HELIX_JOB_BY_DELETE)); this.jobSchedulerMetrics.numCancellationStart.incrementAndGet(); try { distributedJobMode = this.jobsMapping.getDistributedJobMode(jobName); if (distributedJobMode.isPresent() && Boolean.parseBoolean(distributedJobMode.get())) { planningJob = this.jobsMapping.getPlanningJobId(jobName); } else { actualJob = this.jobsMapping.getActualJobId(jobName); } } catch (IOException e) { LOGGER.warn("jobsMapping could not be retrieved for job {}", jobName); return; } if (planningJob.isPresent()) { LOGGER.info("Cancelling planning job helix workflow: {}", planningJob.get()); HelixUtils.cancelWorkflow(planningJob.get(), this.taskDriverHelixManager.get(), this.helixJobStopTimeoutMillis, cancelByDelete); } if (actualJob.isPresent()) { LOGGER.info("Cancelling actual job helix workflow: {}", actualJob.get()); HelixUtils.cancelWorkflow(actualJob.get(), this.jobHelixManager, this.helixJobStopTimeoutMillis, cancelByDelete); } this.jobSchedulerMetrics.numCancellationStart.decrementAndGet(); } private void cancelJobIfRequired(DeleteJobConfigArrivalEvent deleteJobArrival) throws InterruptedException { Properties jobConfig = deleteJobArrival.getJobConfig(); if (PropertiesUtils.getPropAsBoolean(jobConfig, GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, GobblinClusterConfigurationKeys.DEFAULT_CANCEL_RUNNING_JOB_ON_DELETE)) { LOGGER.info("Cancelling workflow: {}", deleteJobArrival.getJobName()); //Workaround for preventing indefinite hangs observed in TaskDriver.getWorkflows() call. Callable<Map<String, String>> workflowsCallable = () -> HelixUtils.getWorkflowIdsFromJobNames(this.jobHelixManager, Collections.singletonList(deleteJobArrival.getJobName())); Retryer<Map<String, String>> retryer = RetryerBuilder.<Map<String, String>>newBuilder() .retryIfException() .withStopStrategy(StopStrategies.stopAfterAttempt(5)) .withAttemptTimeLimiter(AttemptTimeLimiters.fixedTimeLimit(this.helixWorkflowListingTimeoutMillis, TimeUnit.MILLISECONDS)).build(); Map<String, String> jobNameToWorkflowIdMap; try { jobNameToWorkflowIdMap = retryer.call(workflowsCallable); } catch (ExecutionException | RetryException e) { LOGGER.error("Exception encountered when getting workflows from Helix; likely a Helix/Zk issue.", e); return; } if (jobNameToWorkflowIdMap.containsKey(deleteJobArrival.getJobName())) { String workflowId = jobNameToWorkflowIdMap.get(deleteJobArrival.getJobName()); boolean cancelByDelete = PropertiesUtils.getPropAsBoolean(jobConfig, GobblinClusterConfigurationKeys.CANCEL_HELIX_JOB_BY_DELETE, String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_CANCEL_HELIX_JOB_BY_DELETE)); HelixUtils.cancelWorkflow(workflowId, this.jobHelixManager, helixJobStopTimeoutMillis, cancelByDelete); LOGGER.info("Cancelled workflow: {}", deleteJobArrival.getJobName()); //Wait until the cancelled job is complete. waitForJobCompletion(deleteJobArrival.getJobName()); } else { LOGGER.warn("Could not find Helix Workflow Id for job: {}", deleteJobArrival.getJobName()); } } } /** * This class is responsible for running non-scheduled jobs. */ class NonScheduledJobRunner implements Runnable { private final Properties jobProps; private final GobblinHelixJobLauncherListener jobListener; private final Long creationTimeInMillis; public NonScheduledJobRunner(Properties jobProps, GobblinHelixJobLauncherListener jobListener) { this.jobProps = jobProps; this.jobListener = jobListener; this.creationTimeInMillis = System.currentTimeMillis(); } @Override public void run() { try { GobblinHelixJobScheduler.this.jobSchedulerMetrics.updateTimeBeforeJobLaunching(this.jobProps); GobblinHelixJobScheduler.this.jobSchedulerMetrics.updateTimeBetweenJobSchedulingAndJobLaunching(this.creationTimeInMillis, System.currentTimeMillis()); GobblinHelixJobScheduler.this.runJob(this.jobProps, this.jobListener); } catch (JobException je) { LOGGER.error("Failed to schedule or run job " + this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), je); } } } }
2,218
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinClusterException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; public class GobblinClusterException extends RuntimeException { public GobblinClusterException(final String message, final Throwable cause) { super(message, cause); } }
2,219
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixPlanningJobLauncherMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.TimeUnit; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; public class GobblinHelixPlanningJobLauncherMetrics extends StandardMetricsBridge.StandardMetrics { private final String metricsName; public static final String TIMER_FOR_COMPLETED_PLANNING_JOBS = "timeForCompletedPlanningJobs"; public static final String TIMER_FOR_FAILED_PLANNING_JOBS = "timeForFailedPlanningJobs"; public static final String METER_FOR_SKIPPED_PLANNING_JOBS = "skippedPlanningJobs"; final ContextAwareTimer timeForCompletedPlanningJobs; final ContextAwareTimer timeForFailedPlanningJobs; final ContextAwareMeter skippedPlanningJobs; public GobblinHelixPlanningJobLauncherMetrics(String metricsName, final MetricContext metricContext, int windowSizeInMin, HelixJobsMapping jobsMapping) { this.metricsName = metricsName; this.timeForCompletedPlanningJobs = metricContext.contextAwareTimer(TIMER_FOR_COMPLETED_PLANNING_JOBS, windowSizeInMin, TimeUnit.MINUTES); this.timeForFailedPlanningJobs = metricContext.contextAwareTimer(TIMER_FOR_FAILED_PLANNING_JOBS, windowSizeInMin, TimeUnit.MINUTES); this.skippedPlanningJobs = metricContext.contextAwareMeter(METER_FOR_SKIPPED_PLANNING_JOBS); this.contextAwareMetrics.add(timeForCompletedPlanningJobs); this.contextAwareMetrics.add(timeForFailedPlanningJobs); } public void updateTimeForCompletedPlanningJobs(long startTime) { Instrumented.updateTimer( com.google.common.base.Optional.of(this.timeForCompletedPlanningJobs), System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } public void updateTimeForFailedPlanningJobs(long startTime) { Instrumented.updateTimer( com.google.common.base.Optional.of(this.timeForFailedPlanningJobs), System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS); } @Override public String getName() { return this.metricsName; } }
2,220
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.net.URI; import org.apache.hadoop.fs.Path; import org.apache.helix.task.Task; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskFactory; import com.typesafe.config.Config; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; /** * <p> A {@link TaskFactory} that creates {@link GobblinHelixJobTask} * to run task driver logic. */ @Slf4j class GobblinHelixJobFactory implements TaskFactory { protected HelixJobsMapping jobsMapping; protected TaskRunnerSuiteBase.Builder builder; @Getter protected GobblinHelixJobLauncherMetrics launcherMetrics; @Getter protected GobblinHelixJobTask.GobblinHelixJobTaskMetrics jobTaskMetrics; @Getter protected GobblinHelixMetrics helixMetrics; private void initJobMapping(TaskRunnerSuiteBase.Builder builder) { Config sysConfig = builder.getConfig(); Path appWorkDir = builder.getAppWorkPath(); URI rootPathUri = PathUtils.getRootPath(appWorkDir).toUri(); this.jobsMapping = new HelixJobsMapping(sysConfig, rootPathUri, appWorkDir.toString()); } public GobblinHelixJobFactory(TaskRunnerSuiteBase.Builder builder, MetricContext metricContext) { this.builder = builder; initJobMapping(this.builder); // initialize job related metrics (planning jobs) int metricsWindowSizeInMin = ConfigUtils.getInt(builder.getConfig(), ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES, ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES); this.launcherMetrics = new GobblinHelixJobLauncherMetrics("launcherInJobFactory", metricContext, metricsWindowSizeInMin); this.jobTaskMetrics = new GobblinHelixJobTask.GobblinHelixJobTaskMetrics( metricContext, metricsWindowSizeInMin); this.helixMetrics = new GobblinHelixMetrics("helixMetricsInJobFactory", metricContext, metricsWindowSizeInMin); } @Override public Task createNewTask(TaskCallbackContext context) { return new GobblinHelixJobTask(context, this.jobsMapping, this.builder, this.launcherMetrics, this.jobTaskMetrics, this.helixMetrics); } }
2,221
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobLauncherListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.TimeUnit; import com.google.common.base.Optional; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.runtime.JobContext; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.listeners.AbstractJobListener; import org.apache.gobblin.runtime.listeners.JobListener; /** * A job listener used when {@link GobblinHelixJobLauncher} launches a job. * The {@link GobblinHelixJobLauncherMetrics} will always be passed in because * it will be be updated accordingly. */ class GobblinHelixJobLauncherListener extends AbstractJobListener { private final GobblinHelixJobLauncherMetrics jobLauncherMetrics; private static final String JOB_START_TIME = "jobStartTime"; GobblinHelixJobLauncherListener(GobblinHelixJobLauncherMetrics jobLauncherMetrics) { this.jobLauncherMetrics = jobLauncherMetrics; } @Override public void onJobPrepare(JobContext jobContext) throws Exception { super.onJobPrepare(jobContext); jobContext.getJobState().setProp(JOB_START_TIME, Long.toString(System.nanoTime())); jobLauncherMetrics.numJobsLaunched.mark(); } /** * From {@link org.apache.gobblin.runtime.AbstractJobLauncher#launchJob(JobListener)}, the final * job state should only be FAILED or COMMITTED. This means the completed jobs metrics covers * both failed jobs and committed jobs. */ @Override public void onJobCompletion(JobContext jobContext) throws Exception { super.onJobCompletion(jobContext); long startTime = jobContext.getJobState().getPropAsLong(JOB_START_TIME); jobLauncherMetrics.numJobsCompleted.mark(); Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForCompletedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS); if (jobContext.getJobState().getState() == JobState.RunningState.FAILED) { jobLauncherMetrics.numJobsFailed.mark(); Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForFailedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS); } else { jobLauncherMetrics.numJobsCommitted.mark(); Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForCommittedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS); } } @Override public void onJobCancellation(JobContext jobContext) throws Exception { super.onJobCancellation(jobContext); jobLauncherMetrics.numJobsCancelled.mark(); } }
2,222
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/FsJobConfigurationManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileSystem; import com.google.common.base.Optional; import com.google.common.eventbus.EventBus; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.api.FsSpecConsumer; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.api.SpecConsumer; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.ExecutorsUtils; /** * A {@link JobConfigurationManager} that reads {@link JobSpec}s from a source path on a * {@link org.apache.hadoop.fs.FileSystem} and posts them to an {@link EventBus}. * The {@link FsJobConfigurationManager} has an underlying {@link FsSpecConsumer} that periodically reads the * {@link JobSpec}s from the filesystem and posts an appropriate JobConfigArrivalEvent with the job configuration to * the EventBus for consumption by the listeners. */ @Slf4j public class FsJobConfigurationManager extends JobConfigurationManager { private static final long DEFAULT_JOB_SPEC_REFRESH_INTERVAL = 60; private final long refreshIntervalInSeconds; private final ScheduledExecutorService fetchJobSpecExecutor; private final Optional<MutableJobCatalog> jobCatalogOptional; private final SpecConsumer specConsumer; public FsJobConfigurationManager(EventBus eventBus, Config config, FileSystem fs) { this(eventBus, config, null, fs); } public FsJobConfigurationManager(EventBus eventBus, Config config, MutableJobCatalog jobCatalog, FileSystem fs) { super(eventBus, config); this.jobCatalogOptional = jobCatalog != null ? Optional.of(jobCatalog) : Optional.absent(); this.refreshIntervalInSeconds = ConfigUtils.getLong(config, GobblinClusterConfigurationKeys.JOB_SPEC_REFRESH_INTERVAL, DEFAULT_JOB_SPEC_REFRESH_INTERVAL); this.fetchJobSpecExecutor = Executors.newSingleThreadScheduledExecutor( ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("FetchJobSpecExecutor"))); this.specConsumer = new FsSpecConsumer(fs, config); } protected void startUp() throws Exception { super.startUp(); // Schedule the job config fetch task this.fetchJobSpecExecutor.scheduleAtFixedRate(new Runnable() { @Override public void run() { try { fetchJobSpecs(); } catch (Exception e) { //Log error and swallow exception to allow executor service to continue scheduling the thread log.error("Failed to fetch job specs due to: ", e); } } }, 0, this.refreshIntervalInSeconds, TimeUnit.SECONDS); } @Override protected void shutDown() throws Exception { ExecutorsUtils.shutdownExecutorService(this.fetchJobSpecExecutor, Optional.of(log)); super.shutDown(); } void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, JobSpec>> jobSpecs = (List<Pair<SpecExecutor.Verb, JobSpec>>) this.specConsumer.changedSpecs().get(); log.info("Fetched {} job specs", jobSpecs.size()); for (Pair<SpecExecutor.Verb, JobSpec> entry : jobSpecs) { JobSpec jobSpec = entry.getValue(); SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition if (this.jobCatalogOptional.isPresent()) { this.jobCatalogOptional.get().put(jobSpec); } postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { //Handle update. if (this.jobCatalogOptional.isPresent()) { this.jobCatalogOptional.get().put(jobSpec); } postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete if (this.jobCatalogOptional.isPresent()) { this.jobCatalogOptional.get().remove(jobSpec.getUri()); } postDeleteJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.CANCEL)) { // Handle cancel postCancelJobConfigArrival(jobSpec.getUri().toString()); } try { //Acknowledge the successful consumption of the JobSpec back to the SpecConsumer, so that the //SpecConsumer can delete the JobSpec. this.specConsumer.commit(jobSpec); } catch (IOException e) { log.error("Error when committing to FsSpecConsumer: ", e); } } } }
2,223
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/InMemoryWuSingleTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.util.List; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.writer.DataWriter; import org.apache.gobblin.writer.DataWriterBuilder; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.collect.Lists; import com.typesafe.config.Config; /** * Instead of deserializing {@link JobState} and {@link WorkUnit} from filesystem, create them in memory. * Uses {@link DummyDataWriter} so that the execution of a Task goes through. * * This extension class added a declared dummyWriterBuilder so that the task execution will go through. * This class is primarily designed for testing purpose. */ public class InMemoryWuSingleTask extends SingleTask { public InMemoryWuSingleTask(String jobId, Path workUnitFilePath, Path jobStateFilePath, FileSystem fs, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, Config dynamicConfig) { super(jobId, workUnitFilePath, jobStateFilePath, fs, taskAttemptBuilder, stateStores, dynamicConfig); } @Override protected List<WorkUnit> getWorkUnits() throws IOException { WorkUnit workUnit = new WorkUnit(); workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, "randomTask"); workUnit.setProp("source.class", "org.apache.gobblin.cluster.DummySource"); // Missing this line leads to failure in precondition check of avro writer. workUnit.setProp(ConfigurationKeys.WRITER_BUILDER_CLASS, DummyDataWriterBuilder.class.getName()); return Lists.newArrayList(workUnit); } @Override protected JobState getJobState() throws IOException { JobState jobState = new JobState("randomJobName", "randomJobId"); return jobState; } public static class DummyDataWriterBuilder extends DataWriterBuilder<String, Integer> { @Override public DataWriter<Integer> build() throws IOException { return new DummyDataWriter(); } } private static class DummyDataWriter implements DataWriter<Integer> { @Override public void write(Integer record) throws IOException { // Nothing to do } @Override public void commit() throws IOException { // Nothing to do } @Override public void cleanup() throws IOException { // Nothing to do } @Override public long recordsWritten() { return 0; } @Override public long bytesWritten() throws IOException { return 0; } @Override public void close() throws IOException { // Nothing to do } } }
2,224
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinClusterUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.ApplicationConstants; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.DynamicConfigGenerator; import org.apache.gobblin.runtime.AbstractJobLauncher; import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.JobConfigurationUtils; import org.apache.gobblin.util.PathUtils; @Alpha @Slf4j public class GobblinClusterUtils { static final String JAVA_TMP_DIR_KEY = "java.io.tmpdir"; /** * This template will be resolved by replacing "VALUE" as the value that gobblin recognized. * For more details, check {@link GobblinClusterUtils#setSystemProperties(Config)} */ private static final String GOBBLIN_CLUSTER_SYSTEM_PROPERTY_LIST_TEMPLATE = GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX + "systemPropertiesList.$VALUE"; /** * This enum is used for specifying JVM options that Gobblin-Cluster will set whose value will need to be obtained * in JVM runtime. * e.g. YARN_CACHE will be used by Gobblin-on-YARN (an extension of Gobblin-Cluster) and resolved to an YARN-specific * temporary location internal to the application. * * Note that we could specify a couple of keys associated with the value, meaning the value should only be resolved * to associated keys but nothing else to avoid abusive usage. Users could also set resolved * {@link #GOBBLIN_CLUSTER_SYSTEM_PROPERTY_LIST_TEMPLATE} to expand default associated-key list. * * e.g. setting `gobblin.cluster.systemPropertiesList.YARN_CACHE` = [a,b] expands the associated-key list to * [java.io.tmpdir, a, b]. Only when a key is found in the associated-key list, then when you set * {@link GobblinClusterConfigurationKeys#GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX}.${keyName}=YARN_CACHE, will the * resolution for the -D${KeyName} = resolvedValue(YARN_CACHE) happen. */ public enum JVM_ARG_VALUE_RESOLVER { YARN_CACHE { @Override public List<String> getAssociatedKeys() { return yarnCacheAssociatedKeys; } @Override public String getResolution() { //When keys like java.io.tmpdir is configured to "YARN_CACHE", it sets the tmp dir to the Yarn container's cache location. // This setting will only be useful when the cluster is deployed in Yarn mode. return System.getenv(ApplicationConstants.Environment.PWD.key()); } }; // Kept for backward-compatibility private static List<String> yarnCacheAssociatedKeys = ImmutableList.of(JAVA_TMP_DIR_KEY); // default associated key with the value. public abstract List<String> getAssociatedKeys() ; public abstract String getResolution(); public static boolean contains(String value) { for (JVM_ARG_VALUE_RESOLVER v : JVM_ARG_VALUE_RESOLVER.values()) { if (v.name().equalsIgnoreCase(value)) { return true; } } return false; } } /** * Get the name of the current host. * * @return the name of the current host * @throws UnknownHostException if the host name is unknown */ public static String getHostname() throws UnknownHostException { return InetAddress.getLocalHost().getHostName(); } /** * Get the application working directory {@link Path}. * * @param fs a {@link FileSystem} instance on which {@link FileSystem#getHomeDirectory()} is called * to get the home directory of the {@link FileSystem} of the application working directory * @param applicationName the application name * @param applicationId the application ID in string form * @return the cluster application working directory {@link Path} */ public static Path getAppWorkDirPathFromConfig(Config config, FileSystem fs, String applicationName, String applicationId) { if (config.hasPath(GobblinClusterConfigurationKeys.CLUSTER_WORK_DIR)) { return new Path(new Path(fs.getUri()), PathUtils.combinePaths(config.getString(GobblinClusterConfigurationKeys.CLUSTER_WORK_DIR), getAppWorkDirPath(applicationName, applicationId))); } return new Path(fs.getHomeDirectory(), getAppWorkDirPath(applicationName, applicationId)); } /** * Get the application working directory {@link String}. * * @param applicationName the application name * @param applicationId the application ID in string form * @return the cluster application working directory {@link String} */ public static String getAppWorkDirPath(String applicationName, String applicationId) { return applicationName + Path.SEPARATOR + applicationId; } /** * Generate the path to the job.state file * @param usingStateStore is a state store being used to store the job.state content * @param appWorkPath work directory * @param jobId job id * @return a {@link Path} referring to the job.state */ public static Path getJobStateFilePath(boolean usingStateStore, Path appWorkPath, String jobId) { final Path jobStateFilePath; // the state store uses a path of the form workdir/_jobstate/job_id/job_id.job.state while old method stores the file // in the app work dir. if (usingStateStore) { jobStateFilePath = new Path(appWorkPath, GobblinClusterConfigurationKeys.JOB_STATE_DIR_NAME + Path.SEPARATOR + jobId + Path.SEPARATOR + jobId + "." + AbstractJobLauncher.JOB_STATE_FILE_NAME); } else { jobStateFilePath = new Path(appWorkPath, jobId + "." + AbstractJobLauncher.JOB_STATE_FILE_NAME); } log.info("job state file path: " + jobStateFilePath); return jobStateFilePath; } /** * Set the system properties from the input {@link Config} instance * @param config */ public static void setSystemProperties(Config config) { Properties properties = ConfigUtils.configToProperties(ConfigUtils.getConfig(config, GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX, ConfigFactory.empty())); for (Map.Entry<Object, Object> entry: properties.entrySet()) { if (JVM_ARG_VALUE_RESOLVER.contains(entry.getValue().toString())) { JVM_ARG_VALUE_RESOLVER enumMember = JVM_ARG_VALUE_RESOLVER.valueOf(entry.getValue().toString()); List<String> allowedKeys = new ArrayList<>(enumMember.getAssociatedKeys()); allowedKeys.addAll(getAdditionalKeys(entry.getValue().toString(), config)); if (allowedKeys.contains(entry.getKey().toString())) { log.info("Setting tmp directory to: {}", enumMember.getResolution()); System.setProperty(entry.getKey().toString(), enumMember.getResolution()); continue; } else { log.warn("String {} not being registered for dynamic JVM-arg resolution, " + "considering add it by setting extension key", entry.getKey()); } } System.setProperty(entry.getKey().toString(), entry.getValue().toString()); } } private static Collection<String> getAdditionalKeys(String value, Config config) { String resolvedKey = GOBBLIN_CLUSTER_SYSTEM_PROPERTY_LIST_TEMPLATE.replace("$VALUE", value); if (config.hasPath(resolvedKey)) { return StreamSupport.stream( Splitter.on(",").trimResults().omitEmptyStrings().split(config.getString(resolvedKey)).spliterator(), false ).collect(Collectors.toList()); } else { return Lists.newArrayList(); } } /** * Get the dynamic config from a {@link DynamicConfigGenerator} * @param config input config * @return the dynamic config */ public static Config getDynamicConfig(Config config) { // load dynamic configuration and add them to the job properties DynamicConfigGenerator dynamicConfigGenerator = DynamicConfigGeneratorFactory.createDynamicConfigGenerator(config); Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(config); return dynamicConfig; } /** * Add dynamic config with higher precedence to the input config * @param config input config * @return a config combining the input config with the dynamic config */ public static Config addDynamicConfig(Config config) { return getDynamicConfig(config).withFallback(config); } /** * A utility method to construct a {@link FileSystem} object with the configured Hadoop overrides provided as part of * the cluster configuration. * @param config * @param conf * @return a {@link FileSystem} object that is instantiated with the appropriated Hadoop config overrides. * @throws IOException */ public static FileSystem buildFileSystem(Config config, Configuration conf) throws IOException { Config hadoopOverrides = ConfigUtils.getConfigOrEmpty(config, GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX); //Add any Hadoop-specific overrides into the Configuration object JobConfigurationUtils.putPropertiesIntoConfiguration(ConfigUtils.configToProperties(hadoopOverrides), conf); return config.hasPath(ConfigurationKeys.FS_URI_KEY) ? FileSystem .get(URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)), conf) : FileSystem.get(conf); } }
2,225
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobLauncherMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.TimeUnit; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareMeter; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.runtime.api.JobExecutionLauncher; /** * Metrics that relates to jobs launched by {@link GobblinHelixJobLauncher}. */ class GobblinHelixJobLauncherMetrics extends StandardMetricsBridge.StandardMetrics { private final String metricsName; final ContextAwareMeter numJobsLaunched; final ContextAwareMeter numJobsCompleted; final ContextAwareMeter numJobsCommitted; final ContextAwareMeter numJobsFailed; final ContextAwareMeter numJobsCancelled; final ContextAwareTimer timeForCompletedJobs; final ContextAwareTimer timeForFailedJobs; final ContextAwareTimer timeForCommittedJobs; public GobblinHelixJobLauncherMetrics(String metricsName, final MetricContext metricContext, int windowSizeInMin) { this.metricsName = metricsName; this.numJobsLaunched = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_LAUNCHED); this.contextAwareMetrics.add(this.numJobsLaunched); this.numJobsCompleted = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_COMPLETED); this.contextAwareMetrics.add(this.numJobsCompleted); this.numJobsCommitted = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_COMMITTED); this.contextAwareMetrics.add(this.numJobsCommitted); this.numJobsFailed = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_FAILED); this.contextAwareMetrics.add(this.numJobsFailed); this.numJobsCancelled = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_CANCELLED); this.contextAwareMetrics.add(this.numJobsCancelled); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.NUM_JOBS_RUNNING, () -> (int) (GobblinHelixJobLauncherMetrics.this.numJobsLaunched.getCount() - GobblinHelixJobLauncherMetrics.this.numJobsCompleted.getCount()))); this.timeForCompletedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_COMPLETED_JOBS, windowSizeInMin, TimeUnit.MINUTES); this.timeForFailedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_FAILED_JOBS, windowSizeInMin, TimeUnit.MINUTES); this.timeForCommittedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_COMMITTED_JOBS, windowSizeInMin, TimeUnit.MINUTES); this.contextAwareMetrics.add(timeForCommittedJobs); this.contextAwareMetrics.add(timeForCompletedJobs); this.contextAwareMetrics.add(timeForFailedJobs); } @Override public String getName() { return this.metricsName; } }
2,226
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTaskRunner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.List; import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Service; import com.google.common.util.concurrent.ServiceManager; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.TaskExecutor; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.HadoopUtils; class SingleTaskRunner { private static final Logger logger = LoggerFactory.getLogger(SingleTaskRunner.class); protected final String jobId; protected final String workUnitFilePath; protected final Config clusterConfig; private final Path appWorkPath; @VisibleForTesting SingleTask task; private TaskExecutor taskExecutor; private GobblinHelixTaskStateTracker taskStateTracker; private ServiceManager serviceManager; SingleTaskRunner(final String clusterConfigFilePath, final String jobId, final String workUnitFilePath) { this.jobId = jobId; this.workUnitFilePath = workUnitFilePath; this.clusterConfig = ConfigFactory.parseFile(new File(clusterConfigFilePath)); final String workDir = this.clusterConfig.getString(GobblinTaskRunner.CLUSTER_APP_WORK_DIR); this.appWorkPath = new Path(workDir); } void run() throws IOException, InterruptedException { this.run(false); } /** * * @param fail set to false in normal cases, when set to true, the underlying task will fail. */ void run(boolean fail) throws IOException, InterruptedException{ logger.info("SingleTaskRunner running."); startServices(); runTask(fail); shutdownServices(); } @VisibleForTesting void startServices() { logger.info("SingleTaskRunner start services."); initServices(); this.serviceManager.startAsync(); try { this.serviceManager.awaitHealthy(10, TimeUnit.SECONDS); } catch (final TimeoutException e) { throw new GobblinClusterException("Timeout waiting for services to start.", e); } } private void shutdownServices() { logger.info("SingleTaskRunner shutting down services."); this.serviceManager.stopAsync(); try { this.serviceManager.awaitStopped(1, TimeUnit.MINUTES); } catch (final TimeoutException e) { logger.error("Timeout waiting for services to shutdown.", e); } } private void runTask(boolean fail) throws IOException, InterruptedException { logger.info("SingleTaskRunner running task."); initClusterSingleTask(fail); this.task.run(); } void initClusterSingleTask(boolean fail) throws IOException { final FileSystem fs = getFileSystem(); final StateStores stateStores = new StateStores(this.clusterConfig, this.appWorkPath, GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME, this.appWorkPath, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME, this.appWorkPath, GobblinClusterConfigurationKeys.JOB_STATE_DIR_NAME); final Path jobStateFilePath = GobblinClusterUtils.getJobStateFilePath(stateStores.haveJobStateStore(), this.appWorkPath, this.jobId); final TaskAttemptBuilder taskAttemptBuilder = getTaskAttemptBuilder(stateStores); this.task = createSingleTaskHelper(taskAttemptBuilder, fs, stateStores, jobStateFilePath, fail); } protected SingleTask createSingleTaskHelper(TaskAttemptBuilder taskAttemptBuilder, FileSystem fs, StateStores stateStores, Path jobStateFilePath, boolean fail) throws IOException { return new SingleTask(this.jobId, new Path(this.workUnitFilePath), jobStateFilePath, fs, taskAttemptBuilder, stateStores, GobblinClusterUtils.getDynamicConfig(this.clusterConfig)); } private TaskAttemptBuilder getTaskAttemptBuilder(final StateStores stateStores) { final TaskAttemptBuilder taskAttemptBuilder = new TaskAttemptBuilder(this.taskStateTracker, this.taskExecutor); // No container id is set. Use the default. taskAttemptBuilder.setTaskStateStore(stateStores.getTaskStateStore()); return taskAttemptBuilder; } private void initServices() { final Properties properties = ConfigUtils.configToProperties(this.clusterConfig); this.taskExecutor = new TaskExecutor(properties); this.taskStateTracker = new GobblinHelixTaskStateTracker(properties); final List<Service> services = Lists.newArrayList(this.taskExecutor, this.taskStateTracker); this.serviceManager = new ServiceManager(services); } private FileSystem getFileSystem() throws IOException { final Configuration conf = HadoopUtils.newConfiguration(); final FileSystem fs = this.clusterConfig.hasPath(ConfigurationKeys.FS_URI_KEY) ? FileSystem .get(URI.create(this.clusterConfig.getString(ConfigurationKeys.FS_URI_KEY)), conf) : FileSystem.get(conf); return fs; } }
2,227
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/LeadershipChangeAwareComponent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; public interface LeadershipChangeAwareComponent { void becomeActive(); void becomeStandby(); }
2,228
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixTaskFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixManager; import org.apache.helix.task.Task; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskDriver; import org.apache.helix.task.TaskFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.codahale.metrics.Counter; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.typesafe.config.Config; import lombok.Getter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.runtime.TaskExecutor; import org.apache.gobblin.runtime.TaskStateTracker; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.util.ConfigUtils; /** * An implementation of Helix's {@link TaskFactory} for {@link GobblinHelixTask}s. * * @author Yinan Li */ @Alpha public class GobblinHelixTaskFactory implements TaskFactory { private static final Logger LOGGER = LoggerFactory.getLogger(GobblinHelixTaskFactory.class); private static final String GOBBLIN_CLUSTER_NEW_HELIX_TASK_COUNTER = "gobblin.cluster.new.helix.task"; private final Optional<ContainerMetrics> containerMetrics; private final HelixManager helixManager; private Optional<TaskDriver> taskDriver; private TaskRunnerSuiteBase.Builder builder; /** * A {@link Counter} to count the number of new {@link GobblinHelixTask}s that are created. */ private final Optional<Counter> newTasksCounter; @Getter private final TaskExecutor taskExecutor; @Getter private final GobblinHelixTaskMetrics taskMetrics; private final TaskStateTracker taskStateTracker; private final Path appWorkDir; private final StateStores stateStores; private final TaskAttemptBuilder taskAttemptBuilder; public GobblinHelixTaskFactory(TaskRunnerSuiteBase.Builder builder, MetricContext metricContext, TaskStateTracker taskStateTracker, Config stateStoreConfig) { this(builder, metricContext, taskStateTracker, stateStoreConfig, Optional.absent()); } /** * Constructor that allows passing in a {@link TaskDriver} instance. This constructor is exposed purely for * testing purposes to allow passing in a mock {@link TaskDriver} (e.g. see GobblinHelixTaskTest). For other cases, use * the constructor {@link #GobblinHelixTaskFactory(TaskRunnerSuiteBase.Builder, MetricContext, TaskStateTracker, Config)}. */ @VisibleForTesting public GobblinHelixTaskFactory(TaskRunnerSuiteBase.Builder builder, MetricContext metricContext, TaskStateTracker taskStateTracker, Config stateStoreConfig, Optional<TaskDriver> taskDriver) { // initialize task related metrics int windowSizeInMin = ConfigUtils.getInt(builder.getConfig(), ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES, ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES); this.taskExecutor = new TaskExecutor(ConfigUtils.configToProperties(builder.getConfig())); this.taskMetrics = new GobblinHelixTaskMetrics(taskExecutor, metricContext, windowSizeInMin); this.builder = builder; this.containerMetrics = builder.getContainerMetrics(); this.helixManager = builder.getJobHelixManager(); if (this.containerMetrics.isPresent()) { this.newTasksCounter = Optional.of(this.containerMetrics.get().getCounter(GOBBLIN_CLUSTER_NEW_HELIX_TASK_COUNTER)); } else { this.newTasksCounter = Optional.absent(); } this.taskStateTracker = taskStateTracker; this.appWorkDir = builder.getAppWorkPath(); this.stateStores = new StateStores(stateStoreConfig, appWorkDir, GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME, appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME, appWorkDir, GobblinClusterConfigurationKeys.JOB_STATE_DIR_NAME); this.taskAttemptBuilder = createTaskAttemptBuilder(); this.taskDriver = taskDriver; } private TaskAttemptBuilder createTaskAttemptBuilder() { TaskAttemptBuilder builder = new TaskAttemptBuilder(this.taskStateTracker, this.taskExecutor); builder.setContainerId(this.helixManager.getInstanceName()); builder.setTaskStateStore(this.stateStores.getTaskStateStore()); return builder; } @Override public Task createNewTask(TaskCallbackContext context) { if (this.newTasksCounter.isPresent()) { this.newTasksCounter.get().inc(); } if (!this.taskDriver.isPresent()) { this.taskDriver = Optional.of(new TaskDriver(context.getManager())); } return new GobblinHelixTask(builder, context, this.taskAttemptBuilder, this.stateStores, this.taskMetrics, this.taskDriver.get()); } }
2,229
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixTaskFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.nio.file.Path; import java.util.Map; import org.apache.helix.task.Task; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.codahale.metrics.Counter; import com.google.common.base.Optional; import com.typesafe.config.Config; import org.apache.gobblin.util.GobblinProcessBuilder; import org.apache.gobblin.util.SystemPropertiesWrapper; public class HelixTaskFactory implements TaskFactory { private static final Logger logger = LoggerFactory.getLogger(HelixTaskFactory.class); private static final String GOBBLIN_CLUSTER_NEW_HELIX_TASK_COUNTER = "gobblin.cluster.new.helix.task"; private final Optional<ContainerMetrics> containerMetrics; /** * A {@link Counter} to count the number of new {@link GobblinHelixTask}s that are created. */ private final Optional<Counter> newTasksCounter; private final SingleTaskLauncher launcher; public HelixTaskFactory(Optional<ContainerMetrics> containerMetrics, Path clusterConfPath, Config sysConfig) { this.containerMetrics = containerMetrics; if (this.containerMetrics.isPresent()) { this.newTasksCounter = Optional .of(this.containerMetrics.get().getCounter(GOBBLIN_CLUSTER_NEW_HELIX_TASK_COUNTER)); } else { this.newTasksCounter = Optional.absent(); } launcher = new SingleTaskLauncher(new GobblinProcessBuilder(), new SystemPropertiesWrapper(), clusterConfPath, sysConfig); } @Override public Task createNewTask(TaskCallbackContext context) { try { if (this.newTasksCounter.isPresent()) { this.newTasksCounter.get().inc(); } Map<String, String> configMap = context.getTaskConfig().getConfigMap(); return new SingleHelixTask(this.launcher, configMap); } catch (IOException ioe) { final String msg = "Failed to create a new SingleHelixTask"; logger.error(msg, ioe); throw new GobblinClusterException(msg, ioe); } } }
2,230
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.util.List; import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.broker.SharedResourcesBrokerFactory; import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes; import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.runtime.GobblinMultiTaskAttempt; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.source.workunit.MultiWorkUnit; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.JobLauncherUtils; import org.apache.gobblin.util.SerializationUtils; /** * A standalone unit to initialize and execute {@link GobblinMultiTaskAttempt} through deserialized {@link WorkUnit} */ public class SingleTask { private static final Logger _logger = LoggerFactory.getLogger(SingleTask.class); public static final String MAX_RETRY_WAITING_FOR_INIT_KEY = "maxRetryBlockedOnTaskAttemptInit"; public static final int DEFAULT_MAX_RETRY_WAITING_FOR_INIT = 2; @VisibleForTesting GobblinMultiTaskAttempt _taskAttempt; private String _jobId; private Path _workUnitFilePath; private Path _jobStateFilePath; private FileSystem _fs; private TaskAttemptBuilder _taskAttemptBuilder; private StateStores _stateStores; private Config _dynamicConfig; private JobState _jobState; // Preventing Helix calling cancel before taskAttempt is created // Checking if taskAttempt is empty is not enough, since canceller runs in different thread as runner, the case to // to avoid here is taskAttempt being created and start to run after cancel has been called. private Condition _taskAttemptBuilt; private Lock _lock; SingleTask(String jobId, Path workUnitFilePath, Path jobStateFilePath, FileSystem fs, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, Config dynamicConfig) { this(jobId, workUnitFilePath, jobStateFilePath, fs, taskAttemptBuilder, stateStores, dynamicConfig, false); } /** * Do all heavy-lifting of initialization in constructor which could be retried if failed, * see the example in {@link GobblinHelixTask}. */ SingleTask(String jobId, Path workUnitFilePath, Path jobStateFilePath, FileSystem fs, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, Config dynamicConfig, boolean skipGetJobState) { _jobId = jobId; _workUnitFilePath = workUnitFilePath; _jobStateFilePath = jobStateFilePath; _fs = fs; _taskAttemptBuilder = taskAttemptBuilder; _stateStores = stateStores; _dynamicConfig = dynamicConfig; _lock = new ReentrantLock(); _taskAttemptBuilt = _lock.newCondition(); if (!skipGetJobState) { try { _jobState = getJobState(); } catch (IOException ioe) { throw new RuntimeException("Failing in deserializing jobState...", ioe); } } else { this._jobState = null; } } public void run() throws IOException, InterruptedException { if (_jobState == null) { throw new RuntimeException("jobState is null. Task may have already been cancelled."); } // Add dynamic configuration to the job state _dynamicConfig.entrySet().forEach(e -> _jobState.setProp(e.getKey(), e.getValue().unwrapped().toString())); Config jobConfig = getConfigFromJobState(_jobState); _logger.debug("SingleTask.run: jobId {} workUnitFilePath {} jobStateFilePath {} jobState {} jobConfig {}", _jobId, _workUnitFilePath, _jobStateFilePath, _jobState, jobConfig); try (SharedResourcesBroker<GobblinScopeTypes> globalBroker = SharedResourcesBrokerFactory .createDefaultTopLevelBroker(jobConfig, GobblinScopeTypes.GLOBAL.defaultScopeInstance())) { SharedResourcesBroker<GobblinScopeTypes> jobBroker = getJobBroker(_jobState, globalBroker); // Secure atomicity of taskAttempt's execution. // Signaling blocking threads if any whenever taskAttempt is nonNull. _taskAttempt = _taskAttemptBuilder.build(getWorkUnits().iterator(), _jobId, _jobState, jobBroker); _lock.lock(); try { _taskAttemptBuilt.signal(); } finally { _lock.unlock(); } // This is a blocking call. _taskAttempt.runAndOptionallyCommitTaskAttempt(GobblinMultiTaskAttempt.CommitPolicy.IMMEDIATE); } finally { _logger.info("Clearing all metrics object in cache."); _taskAttempt.cleanMetrics(); } } private SharedResourcesBroker<GobblinScopeTypes> getJobBroker(JobState jobState, SharedResourcesBroker<GobblinScopeTypes> globalBroker) { return globalBroker.newSubscopedBuilder(new JobScopeInstance(jobState.getJobName(), jobState.getJobId())).build(); } private Config getConfigFromJobState(JobState jobState) { Properties jobProperties = jobState.getProperties(); return ConfigFactory.parseProperties(jobProperties); } protected JobState getJobState() throws IOException { JobState jobState; // read the state from the state store if present, otherwise deserialize directly from the file if (_stateStores.haveJobStateStore()) { jobState = _stateStores.getJobStateStore().get(_jobStateFilePath.getParent().getName(), _jobStateFilePath.getName(), _jobStateFilePath.getParent().getName()); } else { jobState = new JobState(); SerializationUtils.deserializeState(_fs, _jobStateFilePath, jobState); } return jobState; } /** * Deserialize {@link WorkUnit}s from a path. */ protected List<WorkUnit> getWorkUnits() throws IOException { String fileName = _workUnitFilePath.getName(); String storeName = _workUnitFilePath.getParent().getName(); WorkUnit workUnit; try { if (JobLauncherUtils.hasMultiWorkUnitExtension(_workUnitFilePath)) { workUnit = _stateStores.getMwuStateStore().getAll(storeName, fileName).get(0); } else { workUnit = _stateStores.getWuStateStore().getAll(storeName, fileName).get(0); } } catch (IOException e) { //Add workunitFilePath to the IOException message to aid debugging throw new IOException("Exception retrieving state from state store for workunit: " + _workUnitFilePath.toString(), e); } // The list of individual WorkUnits (flattened) to run List<WorkUnit> workUnits = Lists.newArrayList(); if (workUnit instanceof MultiWorkUnit) { // Flatten the MultiWorkUnit so the job configuration properties can be added to each individual WorkUnits List<WorkUnit> flattenedWorkUnits = JobLauncherUtils.flattenWorkUnits(((MultiWorkUnit) workUnit).getWorkUnits()); workUnits.addAll(flattenedWorkUnits); } else { workUnits.add(workUnit); } return workUnits; } public void cancel() { int retryCount = 0 ; int maxRetry = ConfigUtils.getInt(_dynamicConfig, MAX_RETRY_WAITING_FOR_INIT_KEY, DEFAULT_MAX_RETRY_WAITING_FOR_INIT); try { _lock.lock(); try { while (_taskAttempt == null) { // await return false if timeout on this around if (!_taskAttemptBuilt.await(5, TimeUnit.SECONDS) && ++retryCount > maxRetry) { throw new IllegalStateException("Failed to initialize taskAttempt object before cancel"); } } } finally { _lock.unlock(); } if (_taskAttempt != null) { _logger.info("Task cancelled: Shutdown starting for tasks with jobId: {}", _jobId); _taskAttempt.shutdownTasks(); _logger.info("Task cancelled: Shutdown complete for tasks with jobId: {}", _jobId); } else { throw new IllegalStateException("TaskAttempt not initialized while passing conditional barrier"); } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while shutting down task with jobId: " + _jobId, e); } } }
2,231
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ContainerHealthMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; public class ContainerHealthMetrics { public static final String CONTAINER_METRICS_PREFIX = "container.health.metrics."; public static final String PROCESS_CPU_LOAD = CONTAINER_METRICS_PREFIX + "processCpuLoad"; public static final String PROCESS_CPU_TIME = CONTAINER_METRICS_PREFIX + "processCpuTime"; public static final String PROCESS_HEAP_USED_SIZE = CONTAINER_METRICS_PREFIX + "processHeapUsedSize"; public static final String SYSTEM_CPU_LOAD = CONTAINER_METRICS_PREFIX + "systemCpuLoad"; public static final String SYSTEM_LOAD_AVG = CONTAINER_METRICS_PREFIX + "systemLoadAvg"; public static final String COMMITTED_VMEM_SIZE = CONTAINER_METRICS_PREFIX + "committedVmemSize"; public static final String FREE_SWAP_SPACE_SIZE = CONTAINER_METRICS_PREFIX + "freeSwapSpaceSize"; public static final String TOTAL_SWAP_SPACE_SIZE = CONTAINER_METRICS_PREFIX + "totalSwapSpaceSize"; public static final String NUM_AVAILABLE_PROCESSORS = CONTAINER_METRICS_PREFIX + "numAvailableProcessors"; public static final String TOTAL_PHYSICAL_MEM_SIZE = CONTAINER_METRICS_PREFIX + "totalPhysicalMemSize"; public static final String FREE_PHYSICAL_MEM_SIZE = CONTAINER_METRICS_PREFIX + "freePhysicalMemSize"; public static final String MINOR_GC_COUNT = CONTAINER_METRICS_PREFIX + "minorGcCount"; public static final String MINOR_GC_DURATION = CONTAINER_METRICS_PREFIX + "minorGcDuration"; public static final String MAJOR_GC_COUNT = CONTAINER_METRICS_PREFIX + "majorGcCount"; public static final String MAJOR_GC_DURATION = CONTAINER_METRICS_PREFIX + "majorGcDuration"; public static final String UNKNOWN_GC_COUNT = CONTAINER_METRICS_PREFIX + "unknownGcCount"; public static final String UNKNOWN_GC_DURATION = CONTAINER_METRICS_PREFIX + "unknownGcDuration"; }
2,232
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ScheduledJobConfigurationManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.reflect.ConstructorUtils; import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.collect.Maps; import com.google.common.eventbus.EventBus; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecConsumer; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.ExecutorsUtils; @Alpha public class ScheduledJobConfigurationManager extends JobConfigurationManager { private static final Logger LOGGER = LoggerFactory.getLogger(ScheduledJobConfigurationManager.class); private static final long DEFAULT_JOB_SPEC_REFRESH_INTERVAL = 60; private Map<URI, JobSpec> jobSpecs; private final long refreshIntervalInSeconds; private final ScheduledExecutorService fetchJobSpecExecutor; protected final SpecConsumer _specConsumer; private final ClassAliasResolver<SpecConsumer> aliasResolver; public ScheduledJobConfigurationManager(EventBus eventBus, Config config) { super(eventBus, config); this.jobSpecs = Maps.newHashMap(); this.refreshIntervalInSeconds = ConfigUtils.getLong(config, GobblinClusterConfigurationKeys.JOB_SPEC_REFRESH_INTERVAL, DEFAULT_JOB_SPEC_REFRESH_INTERVAL); this.fetchJobSpecExecutor = Executors.newSingleThreadScheduledExecutor( ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("FetchJobSpecExecutor"))); this.aliasResolver = new ClassAliasResolver<>(SpecConsumer.class); try { String specConsumerClassName = GobblinClusterConfigurationKeys.DEFAULT_SPEC_CONSUMER_CLASS; if (config.hasPath(GobblinClusterConfigurationKeys.SPEC_CONSUMER_CLASS_KEY)) { specConsumerClassName = config.getString(GobblinClusterConfigurationKeys.SPEC_CONSUMER_CLASS_KEY); } LOGGER.info("Using SpecConsumer ClassNameclass name/alias " + specConsumerClassName); this._specConsumer = (SpecConsumer) ConstructorUtils .invokeConstructor(Class.forName(this.aliasResolver.resolve(specConsumerClassName)), config); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) { throw new RuntimeException(e); } } @Override protected void startUp() throws Exception { LOGGER.info("Starting the " + ScheduledJobConfigurationManager.class.getSimpleName()); LOGGER.info(String.format("Scheduling the job spec refresh task with an interval of %d second(s)", this.refreshIntervalInSeconds)); // Schedule the job config fetch task this.fetchJobSpecExecutor.scheduleAtFixedRate(new Runnable() { @Override public void run() { try { fetchJobSpecs(); } catch (InterruptedException | ExecutionException e) { LOGGER.error("Failed to fetch job specs", e); throw new RuntimeException("Failed to fetch specs", e); } } }, 0, this.refreshIntervalInSeconds, TimeUnit.SECONDS); } /*** * TODO: Change cluster code to handle Spec. Right now all job properties are needed to be in config and template is not honored * TODO: Materialized JobSpec and make use of ResolvedJobSpec * @throws ExecutionException * @throws InterruptedException */ protected void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this._specConsumer.changedSpecs().get(); for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) { SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete Spec anonymousSpec = (Spec) entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); jobSpecs.remove(entry.getValue().getUri()); } else if (verb.equals(SpecExecutor.Verb.CANCEL)) { // Handle cancel Spec anonymousSpec = entry.getValue(); postCancelJobConfigArrival(anonymousSpec.getUri().toString()); } } } @Override protected void shutDown() throws Exception { ExecutorsUtils.shutdownExecutorService(this.fetchJobSpecExecutor, Optional.of(LOGGER)); } }
2,233
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixMessagingService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.regex.Pattern; import org.apache.helix.Criteria; import org.apache.helix.HelixDataAccessor; import org.apache.helix.HelixManager; import org.apache.helix.HelixProperty; import org.apache.helix.InstanceType; import org.apache.helix.PropertyKey; import org.apache.helix.messaging.CriteriaEvaluator; import org.apache.helix.messaging.DefaultMessagingService; import org.apache.helix.messaging.ZNRecordRow; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Message; import com.google.common.base.Strings; import com.google.common.collect.Lists; import com.google.common.collect.Sets; /** * #HELIX-0.6.7-WORKAROUND * The GobblinHelixMessagingService is a temporary workaround for missing messaging support for INSTANCES in helix 0.6.7 */ public class GobblinHelixMessagingService extends DefaultMessagingService { private GobblinHelixCriteriaEvaluator _gobblinHelixCriteriaEvaluator; private HelixManager _manager; public GobblinHelixMessagingService(HelixManager manager) { super(manager); _manager = manager; _gobblinHelixCriteriaEvaluator = new GobblinHelixCriteriaEvaluator(); } private List<Message> generateMessagesForController(Message message) { List<Message> messages = new ArrayList<Message>(); String id = UUID.randomUUID().toString(); Message newMessage = new Message(message.getRecord(), id); newMessage.setMsgId(id); newMessage.setSrcName(_manager.getInstanceName()); newMessage.setTgtName("Controller"); messages.add(newMessage); return messages; } @Override public Map<InstanceType, List<Message>> generateMessage(final Criteria recipientCriteria, final Message message) { Map<InstanceType, List<Message>> messagesToSendMap = new HashMap<InstanceType, List<Message>>(); InstanceType instanceType = recipientCriteria.getRecipientInstanceType(); if (instanceType == InstanceType.CONTROLLER) { List<Message> messages = generateMessagesForController(message); messagesToSendMap.put(InstanceType.CONTROLLER, messages); } else if (instanceType == InstanceType.PARTICIPANT) { List<Message> messages = new ArrayList<Message>(); List<Map<String, String>> matchedList = _gobblinHelixCriteriaEvaluator.evaluateCriteria(recipientCriteria, _manager); if (!matchedList.isEmpty()) { Map<String, String> sessionIdMap = new HashMap<String, String>(); if (recipientCriteria.isSessionSpecific()) { HelixDataAccessor accessor = _manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); List<LiveInstance> liveInstances = accessor.getChildValues(keyBuilder.liveInstances()); for (LiveInstance liveInstance : liveInstances) { sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); } } for (Map<String, String> map : matchedList) { String id = UUID.randomUUID().toString(); Message newMessage = new Message(message.getRecord(), id); String srcInstanceName = _manager.getInstanceName(); String tgtInstanceName = map.get("instanceName"); // Don't send message to self if (recipientCriteria.isSelfExcluded() && srcInstanceName.equalsIgnoreCase(tgtInstanceName)) { continue; } newMessage.setSrcName(srcInstanceName); newMessage.setTgtName(tgtInstanceName); newMessage.setResourceName(map.get("resourceName")); newMessage.setPartitionName(map.get("partitionName")); if (recipientCriteria.isSessionSpecific()) { newMessage.setTgtSessionId(sessionIdMap.get(tgtInstanceName)); } messages.add(newMessage); } messagesToSendMap.put(InstanceType.PARTICIPANT, messages); } } return messagesToSendMap; } public static class GobblinHelixCriteriaEvaluator extends CriteriaEvaluator { /** * Examine persisted data to match wildcards in {@link Criteria} * @param recipientCriteria Criteria specifying the message destinations * @param manager connection to the persisted data * @return map of evaluated criteria */ public List<Map<String, String>> evaluateCriteria(Criteria recipientCriteria, HelixManager manager) { // get the data HelixDataAccessor accessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); List<HelixProperty> properties; properties = accessor.getChildValues(keyBuilder.liveInstances()); // flatten the data List<ZNRecordRow> allRows = ZNRecordRow.flatten(HelixProperty.convertToList(properties)); // save the matches Set<String> liveParticipants = accessor.getChildValuesMap(keyBuilder.liveInstances()).keySet(); List<ZNRecordRow> result = Lists.newArrayList(); for (ZNRecordRow row : allRows) { // The participant instance name is stored in the return value of either getRecordId() or getMapSubKey() if (rowMatches(recipientCriteria, row) && (liveParticipants.contains(row.getRecordId()) || liveParticipants.contains(row.getMapSubKey()))) { result.add(row); } } Set<Map<String, String>> selected = Sets.newHashSet(); // deduplicate and convert the matches into the required format for (ZNRecordRow row : result) { Map<String, String> resultRow = new HashMap<String, String>(); resultRow.put("instanceName", !recipientCriteria.getInstanceName().equals("") ? (!Strings.isNullOrEmpty(row.getMapSubKey()) ? row.getMapSubKey() : row.getRecordId()) : ""); resultRow.put("resourceName", !recipientCriteria.getResource().equals("") ? row.getRecordId() : ""); resultRow.put("partitionName", !recipientCriteria.getPartition().equals("") ? row.getMapKey() : ""); resultRow.put("partitionState", !recipientCriteria.getPartitionState().equals("") ? row.getMapValue() : ""); selected.add(resultRow); } return Lists.newArrayList(selected); } /** * Check if a given row matches the specified criteria * @param criteria the criteria * @param row row of currently persisted data * @return true if it matches, false otherwise */ private boolean rowMatches(Criteria criteria, ZNRecordRow row) { String instanceName = normalizePattern(criteria.getInstanceName()); String resourceName = normalizePattern(criteria.getResource()); String partitionName = normalizePattern(criteria.getPartition()); String partitionState = normalizePattern(criteria.getPartitionState()); return (stringMatches(instanceName, Strings.nullToEmpty(row.getMapSubKey())) || stringMatches(instanceName, Strings.nullToEmpty(row.getRecordId()))) && stringMatches(resourceName, Strings.nullToEmpty(row.getRecordId())) && stringMatches(partitionName, Strings.nullToEmpty(row.getMapKey())) && stringMatches(partitionState, Strings.nullToEmpty(row.getMapValue())); } /** * Convert an SQL like expression into a Java matches expression * @param pattern SQL like match pattern (i.e. contains '%'s and '_'s) * @return Java matches expression (i.e. contains ".*?"s and '.'s) */ private String normalizePattern(String pattern) { if (pattern == null || pattern.equals("") || pattern.equals("*")) { pattern = "%"; } StringBuilder builder = new StringBuilder(); for (int i = 0; i < pattern.length(); i++) { char ch = pattern.charAt(i); if ("[](){}.*+?$^|#\\".indexOf(ch) != -1) { // escape any reserved characters builder.append("\\"); } // append the character builder.append(ch); } pattern = builder.toString().toLowerCase().replace("_", ".").replace("%", ".*?"); return pattern; } /** * Check if a string matches a pattern * @param pattern pattern allowed by Java regex matching * @param value the string to check * @return true if they match, false otherwise */ private boolean stringMatches(String pattern, String value) { Pattern p = Pattern.compile(pattern, Pattern.CASE_INSENSITIVE | Pattern.DOTALL); return p.matcher(value).matches(); } } }
2,234
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ContainerHealthMetricsService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; import com.google.common.util.concurrent.AbstractScheduledService; import com.google.common.util.concurrent.AtomicDouble; import com.sun.management.OperatingSystemMXBean; import com.typesafe.config.Config; import lombok.Data; import lombok.Getter; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.RootMetricContext; import org.apache.gobblin.util.ConfigUtils; /** * A utility class that periodically emits system level metrics that report the health of the container. * Reported metrics include CPU/Memory usage of the JVM, system load etc. * * <p> * This class extends the {@link AbstractScheduledService} so it can be used with a * {@link com.google.common.util.concurrent.ServiceManager} that manages the lifecycle of * a {@link ContainerHealthMetricsService}. * </p> */ public class ContainerHealthMetricsService extends AbstractScheduledService { //Container metrics service configurations private static final String CONTAINER_METRICS_SERVICE_REPORTING_INTERVAL_SECONDS = "container.health.metrics.service.reportingIntervalSeconds"; private static final Long DEFAULT_CONTAINER_METRICS_REPORTING_INTERVAL = 30L; private static final Set<String> YOUNG_GC_TYPES = new HashSet<>(3); private static final Set<String> OLD_GC_TYPES = new HashSet<String>(3); static { // young generation GC names YOUNG_GC_TYPES.add("PS Scavenge"); YOUNG_GC_TYPES.add("ParNew"); YOUNG_GC_TYPES.add("G1 Young Generation"); // old generation GC names OLD_GC_TYPES.add("PS MarkSweep"); OLD_GC_TYPES.add("ConcurrentMarkSweep"); OLD_GC_TYPES.add("G1 Old Generation"); } private final long metricReportingInterval; private final OperatingSystemMXBean operatingSystemMXBean; private final MemoryMXBean memoryMXBean; private final List<GarbageCollectorMXBean> garbageCollectorMXBeans; @Getter private GcStats lastGcStats; @Getter private GcStats currentGcStats; //Heap stats AtomicDouble processCpuLoad = new AtomicDouble(0); AtomicDouble systemCpuLoad = new AtomicDouble(0); AtomicDouble systemLoadAvg = new AtomicDouble(0); AtomicDouble committedVmemSize = new AtomicDouble(0); AtomicDouble processCpuTime = new AtomicDouble(0); AtomicDouble freeSwapSpaceSize = new AtomicDouble(0); AtomicDouble numAvailableProcessors = new AtomicDouble(0); AtomicDouble totalPhysicalMemSize = new AtomicDouble(0); AtomicDouble totalSwapSpaceSize = new AtomicDouble(0); AtomicDouble freePhysicalMemSize = new AtomicDouble(0); AtomicDouble processHeapUsedSize = new AtomicDouble(0); //GC stats and counters AtomicDouble minorGcCount = new AtomicDouble(0); AtomicDouble majorGcCount = new AtomicDouble(0); AtomicDouble unknownGcCount = new AtomicDouble(0); AtomicDouble minorGcDuration = new AtomicDouble(0); AtomicDouble majorGcDuration = new AtomicDouble(0); AtomicDouble unknownGcDuration = new AtomicDouble(0); public ContainerHealthMetricsService(Config config) { this.metricReportingInterval = ConfigUtils.getLong(config, CONTAINER_METRICS_SERVICE_REPORTING_INTERVAL_SECONDS, DEFAULT_CONTAINER_METRICS_REPORTING_INTERVAL); this.operatingSystemMXBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class); this.memoryMXBean = ManagementFactory.getMemoryMXBean(); this.garbageCollectorMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); this.lastGcStats = new GcStats(); this.currentGcStats = new GcStats(); //Build all the gauges and register them with the metrics registry. List<ContextAwareGauge<Double>> systemMetrics = buildGaugeList(); systemMetrics.forEach(metric -> RootMetricContext.get().register(metric)); } @Data public static class GcStats { long minorCount; double minorDuration; long majorCount; double majorDuration; long unknownCount; double unknownDuration; } /** * Run one iteration of the scheduled task. If any invocation of this method throws an exception, * the service will transition to the {@link com.google.common.util.concurrent.Service.State#FAILED} state and this method will no * longer be called. */ @Override protected void runOneIteration() throws Exception { this.processCpuLoad.set(this.operatingSystemMXBean.getProcessCpuLoad()); this.systemCpuLoad.set(this.operatingSystemMXBean.getSystemCpuLoad()); this.systemLoadAvg.set(this.operatingSystemMXBean.getSystemLoadAverage()); this.committedVmemSize.set(this.operatingSystemMXBean.getCommittedVirtualMemorySize()); this.processCpuTime.set(this.operatingSystemMXBean.getProcessCpuTime()); this.freeSwapSpaceSize.set(this.operatingSystemMXBean.getFreeSwapSpaceSize()); this.numAvailableProcessors.set(this.operatingSystemMXBean.getAvailableProcessors()); this.totalPhysicalMemSize.set(this.operatingSystemMXBean.getTotalPhysicalMemorySize()); this.totalSwapSpaceSize.set(this.operatingSystemMXBean.getTotalSwapSpaceSize()); this.freePhysicalMemSize.set(this.operatingSystemMXBean.getFreePhysicalMemorySize()); this.processHeapUsedSize.set(this.memoryMXBean.getHeapMemoryUsage().getUsed()); //Get the new GC stats this.currentGcStats = collectGcStats(); // Since GC Beans report accumulated counts/durations, we need to subtract the previous values to obtain the counts/durations // since the last measurement time. this.minorGcCount.set(this.currentGcStats.getMinorCount() - this.lastGcStats.getMinorCount()); this.minorGcDuration.set(this.currentGcStats.getMinorDuration() - this.lastGcStats.getMinorDuration()); this.majorGcCount.set(this.currentGcStats.getMajorCount() - this.lastGcStats.getMajorCount()); this.majorGcDuration.set(this.currentGcStats.getMajorDuration() - this.lastGcStats.getMajorDuration()); this.unknownGcCount.set(this.currentGcStats.getUnknownCount() - this.lastGcStats.getUnknownCount()); this.unknownGcDuration.set(this.currentGcStats.getUnknownDuration() - this.lastGcStats.getUnknownDuration()); //Update last collected stats this.lastGcStats = this.currentGcStats; } protected List<ContextAwareGauge<Double>> buildGaugeList() { List<ContextAwareGauge<Double>> gaugeList = new ArrayList<>(); gaugeList.add(createGauge(ContainerHealthMetrics.PROCESS_CPU_LOAD, this.processCpuLoad)); gaugeList.add(createGauge(ContainerHealthMetrics.SYSTEM_CPU_LOAD, this.systemCpuLoad)); gaugeList.add(createGauge(ContainerHealthMetrics.SYSTEM_LOAD_AVG, this.systemLoadAvg)); gaugeList.add(createGauge(ContainerHealthMetrics.COMMITTED_VMEM_SIZE, this.committedVmemSize)); gaugeList.add(createGauge(ContainerHealthMetrics.PROCESS_CPU_TIME, this.processCpuTime)); gaugeList.add(createGauge(ContainerHealthMetrics.FREE_SWAP_SPACE_SIZE, this.freeSwapSpaceSize)); gaugeList.add(createGauge(ContainerHealthMetrics.NUM_AVAILABLE_PROCESSORS, this.numAvailableProcessors)); gaugeList.add(createGauge(ContainerHealthMetrics.TOTAL_PHYSICAL_MEM_SIZE, this.totalPhysicalMemSize)); gaugeList.add(createGauge(ContainerHealthMetrics.TOTAL_SWAP_SPACE_SIZE, this.totalSwapSpaceSize)); gaugeList.add(createGauge(ContainerHealthMetrics.FREE_PHYSICAL_MEM_SIZE, this.freePhysicalMemSize)); gaugeList.add(createGauge(ContainerHealthMetrics.PROCESS_HEAP_USED_SIZE, this.processHeapUsedSize)); gaugeList.add(createGauge(ContainerHealthMetrics.MINOR_GC_COUNT, this.minorGcCount)); gaugeList.add(createGauge(ContainerHealthMetrics.MINOR_GC_DURATION, this.minorGcDuration)); gaugeList.add(createGauge(ContainerHealthMetrics.MAJOR_GC_COUNT, this.majorGcCount)); gaugeList.add(createGauge(ContainerHealthMetrics.MAJOR_GC_DURATION, this.majorGcDuration)); gaugeList.add(createGauge(ContainerHealthMetrics.UNKNOWN_GC_COUNT, this.unknownGcCount)); gaugeList.add(createGauge(ContainerHealthMetrics.UNKNOWN_GC_DURATION, this.unknownGcDuration)); return gaugeList; } private ContextAwareGauge<Double> createGauge(String name, AtomicDouble metric) { return RootMetricContext.get().newContextAwareGauge(name, () -> metric.get()); } private GcStats collectGcStats() { //Collect GC stats by iterating over all GC beans. GcStats gcStats = new GcStats(); for (GarbageCollectorMXBean garbageCollectorMXBean: this.garbageCollectorMXBeans) { long count = garbageCollectorMXBean.getCollectionCount(); double duration = (double) garbageCollectorMXBean.getCollectionTime(); if (count >= 0) { if (YOUNG_GC_TYPES.contains(garbageCollectorMXBean.getName())) { gcStats.setMinorCount(gcStats.getMinorCount() + count); gcStats.setMinorDuration(gcStats.getMinorDuration() + duration); } else if (OLD_GC_TYPES.contains(garbageCollectorMXBean.getName())) { gcStats.setMajorCount(gcStats.getMajorCount() + count); gcStats.setMajorDuration(gcStats.getMajorDuration() + duration); } else { gcStats.setUnknownCount(gcStats.getUnknownCount() + count); gcStats.setUnknownDuration(gcStats.getUnknownDuration() + duration); } } } return gcStats; } /** * Returns the {@link Scheduler} object used to configure this service. This method will only be * called once. */ @Override protected Scheduler scheduler() { return Scheduler.newFixedRateSchedule(0, this.metricReportingInterval, TimeUnit.SECONDS); } }
2,235
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinTaskStateModelFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import org.apache.helix.HelixManager; import org.apache.helix.task.TaskFactory; import org.apache.helix.task.TaskStateModel; import org.apache.helix.task.TaskStateModelFactory; import org.apache.gobblin.annotation.Alpha; /** * A {@link TaskStateModelFactory} for {@link GobblinTaskStateModel}s. * * <p> * This class is currently not used but may get used in the future if we decide to plugin our own * custom {@link TaskStateModel}. So currently this is like a place holder class. * </p> * * @author Yinan Li */ @Alpha public class GobblinTaskStateModelFactory extends TaskStateModelFactory { private final HelixManager helixManager; private final Map<String, TaskFactory> taskFactoryRegistry; private final ScheduledExecutorService taskExecutor; public GobblinTaskStateModelFactory(HelixManager helixManager, Map<String, TaskFactory> taskFactoryRegistry, ScheduledExecutorService taskExecutor) { super(helixManager, taskFactoryRegistry); this.taskExecutor = taskExecutor; this.helixManager = helixManager; this.taskFactoryRegistry = taskFactoryRegistry; } @Override public TaskStateModel createNewStateModel(String resourceName, String partitionKey) { return new GobblinTaskStateModel(this.helixManager, this.taskFactoryRegistry, this.taskExecutor); } }
2,236
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixMessageSubTypes.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import org.apache.gobblin.annotation.Alpha; /** * An enumeration of Helix message sub types. * * @author Yinan Li */ @Alpha public enum HelixMessageSubTypes { /** * This type is for messages sent when the {@link GobblinClusterManager} is to be shutdown. */ APPLICATION_MASTER_SHUTDOWN, /** * This type is for messages sent when the {@link GobblinTaskRunner}s are to be shutdown. */ WORK_UNIT_RUNNER_SHUTDOWN, /** * This type is for messages sent when the file storing the delegation token has been updated. */ TOKEN_FILE_UPDATED }
2,237
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixManagerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import org.apache.helix.InstanceType; public class GobblinHelixManagerFactory { public static GobblinReferenceCountingZkHelixManager getZKHelixManager(String clusterName, String instanceName, InstanceType type, String zkAddr) { return new GobblinReferenceCountingZkHelixManager(clusterName, instanceName, type, zkAddr); } }
2,238
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/TaskRunnerSuiteThreadModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.net.URI; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.helix.task.TaskFactory; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.util.concurrent.Service; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.runtime.TaskStateTracker; import org.apache.gobblin.runtime.services.JMXReportingService; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PathUtils; /** * A implementation of {@link TaskRunnerSuiteBase} suite which runs tasks in a thread pool. */ class TaskRunnerSuiteThreadModel extends TaskRunnerSuiteBase { protected final GobblinHelixTaskFactory taskFactory; protected final GobblinHelixJobFactory jobFactory; TaskRunnerSuiteThreadModel(TaskRunnerSuiteBase.Builder builder) { super(builder); this.taskFactory = createTaskFactory(builder, this.metricContext); this.jobFactory = new GobblinHelixJobFactory(builder, this.metricContext); } @Override protected Collection<StandardMetricsBridge.StandardMetrics> getMetricsCollection() { return ImmutableList.of(this.taskFactory.getTaskMetrics(), this.jobFactory.getJobTaskMetrics(), this.jobFactory.getLauncherMetrics(), this.jobFactory.getHelixMetrics()); } @Override protected Map<String, TaskFactory> getTaskFactoryMap() { Map<String, TaskFactory> taskFactoryMap = Maps.newHashMap(); taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_TASK_FACTORY_NAME, taskFactory); taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_JOB_FACTORY_NAME, jobFactory); return taskFactoryMap; } @Override protected List<Service> getServices() { return this.services; } private GobblinHelixTaskFactory createTaskFactory(Builder builder, MetricContext metricContext) { Properties properties = ConfigUtils.configToProperties(builder.getConfig()); URI rootPathUri = PathUtils.getRootPath(builder.getAppWorkPath()).toUri(); Config stateStoreJobConfig = ConfigUtils.propertiesToConfig(properties) .withValue(ConfigurationKeys.STATE_STORE_FS_URI_KEY, ConfigValueFactory.fromAnyRef(rootPathUri.toString())); TaskStateTracker taskStateTracker = new GobblinHelixTaskStateTracker(properties); GobblinHelixTaskFactory taskFactory = new GobblinHelixTaskFactory(builder, metricContext, taskStateTracker, stateStoreJobConfig); services.add(taskFactory.getTaskExecutor()); services.add(taskStateTracker); services.add(new JMXReportingService( ImmutableMap.of("task.executor", taskFactory.getTaskExecutor().getTaskExecutorQueueMetricSet()))); return taskFactory; } }
2,239
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/ClusterEventMetadataGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import com.google.common.collect.ImmutableMap; import java.util.List; import java.util.Map; import org.apache.gobblin.annotation.Alias; import org.apache.gobblin.metrics.event.EventName; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.runtime.EventMetadataUtils; import org.apache.gobblin.runtime.JobContext; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.runtime.api.EventMetadataGenerator; /** * {@link EventMetadataGenerator} that outputs the processed message count and error messages * used for job status tracking */ @Alias("cluster") public class ClusterEventMetadataGenerator implements EventMetadataGenerator{ public static final String PROCESSED_COUNT_KEY = "processedCount"; public static final String MESSAGE_KEY = "message"; public Map<String, String> getMetadata(JobContext jobContext, EventName eventName) { List<TaskState> taskStates = jobContext.getJobState().getTaskStates(); String taskException = EventMetadataUtils.getTaskFailureExceptions(taskStates); String jobException = EventMetadataUtils.getJobFailureExceptions(jobContext.getJobState()); ImmutableMap.Builder<String,String> metadataBuilder = ImmutableMap.builder(); if (jobContext.getJobState().contains(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD)) { metadataBuilder.put(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD, jobContext.getJobState().getProp(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD)); } if (jobContext.getJobState().contains(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD)) { metadataBuilder.put(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD, jobContext.getJobState().getProp(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD)); } switch (eventName) { case JOB_COMPLETE: return metadataBuilder.put(PROCESSED_COUNT_KEY, Long.toString(EventMetadataUtils.getProcessedCount(taskStates))).build(); case JOB_FAILED: return metadataBuilder.put(MESSAGE_KEY, taskException.length() != 0 ? taskException : jobException).build(); default: break; } return metadataBuilder.build(); } }
2,240
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTaskRunnerBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; class SingleTaskRunnerBuilder { private String clusterConfigFilePath; private String jobId; private String workUnitFilePath; SingleTaskRunnerBuilder setClusterConfigFilePath(final String clusterConfigFilePath) { this.clusterConfigFilePath = clusterConfigFilePath; return this; } SingleTaskRunnerBuilder setJobId(final String jobId) { this.jobId = jobId; return this; } SingleTaskRunnerBuilder setWorkUnitFilePath(final String workUnitFilePath) { this.workUnitFilePath = workUnitFilePath; return this; } SingleTaskRunner createSingleTaskRunner() { return new SingleTaskRunner(this.clusterConfigFilePath, this.jobId, this.workUnitFilePath); } }
2,241
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixTaskStateTracker.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Map; import java.util.Properties; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledFuture; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Throwables; import com.google.common.collect.Maps; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.metrics.GobblinMetrics; import org.apache.gobblin.runtime.AbstractTaskStateTracker; import org.apache.gobblin.runtime.Task; /** * A concrete extension to {@link AbstractTaskStateTracker} for a Gobblin Cluster. * * <p> * This class is currently still primitive and will be enhanced once we add more monitoring in place. * </p> * * @author Yinan Li */ @Slf4j public class GobblinHelixTaskStateTracker extends AbstractTaskStateTracker { @VisibleForTesting static final String IS_TASK_METRICS_SCHEDULING_FAILURE_FATAL = "helixTaskTracker.isNewTaskRegFailureFatal"; private static final String DEFAULT_TASK_METRICS_SCHEDULING_FAILURE_FATAL = "false"; // Mapping between tasks and the task state reporters associated with them private final Map<String, ScheduledFuture<?>> scheduledReporters = Maps.newHashMap(); private boolean isNewTaskRegFailureFatal; public GobblinHelixTaskStateTracker(Properties properties) { super(properties, log); isNewTaskRegFailureFatal = Boolean.parseBoolean(properties.getProperty(IS_TASK_METRICS_SCHEDULING_FAILURE_FATAL, DEFAULT_TASK_METRICS_SCHEDULING_FAILURE_FATAL)); } @Override public void registerNewTask(Task task) { try { if (GobblinMetrics.isEnabled(task.getTaskState().getWorkunit())) { this.scheduledReporters.put(task.getTaskId(), scheduleTaskMetricsUpdater(new TaskMetricsUpdater(task), task)); } } catch (RejectedExecutionException ree) { // Propagate the exception to caller that has full control of the life-cycle of a helix task. log.error(String.format("Scheduling of task state reporter for task %s was rejected", task.getTaskId())); if (isNewTaskRegFailureFatal) { Throwables.propagate(ree); } } catch (Throwable t) { String errorMsg = "Failure occurred for scheduling task state reporter, "; if (isNewTaskRegFailureFatal) { throw new RuntimeException(errorMsg, t); } else { log.error(errorMsg, t); } } } @Override public void onTaskRunCompletion(Task task) { task.markTaskCompletion(); } @Override public void onTaskCommitCompletion(Task task) { if (GobblinMetrics.isEnabled(task.getTaskState().getWorkunit())) { // Update record-level metrics after the task is done task.updateRecordMetrics(); task.updateByteMetrics(); } // Cancel the task state reporter associated with this task. The reporter might // not be found for the given task because the task fails before the task is // registered. So we need to make sure the reporter exists before calling cancel. if (this.scheduledReporters.containsKey(task.getTaskId())) { this.scheduledReporters.remove(task.getTaskId()).cancel(false); } log.info(String .format("Task %s completed in %dms with state %s", task.getTaskId(), task.getTaskState().getTaskDuration(), task.getTaskState().getWorkingState())); } /** * An extension to {@link AbstractTaskStateTracker.TaskMetricsUpdater}. */ class TaskMetricsUpdater extends AbstractTaskStateTracker.TaskMetricsUpdater { public TaskMetricsUpdater(Task task) { super(task); } } }
2,242
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTaskLauncher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.text.StrTokenizer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.typesafe.config.Config; import org.apache.gobblin.util.GobblinProcessBuilder; import org.apache.gobblin.util.SystemPropertiesWrapper; import static org.apache.gobblin.cluster.SingleTaskRunnerMainOptions.CLUSTER_CONFIG_FILE_PATH; import static org.apache.gobblin.cluster.SingleTaskRunnerMainOptions.JOB_ID; import static org.apache.gobblin.cluster.SingleTaskRunnerMainOptions.WORK_UNIT_FILE_PATH; class SingleTaskLauncher { private static final Logger logger = LoggerFactory.getLogger(SingleTaskLauncher.class); private final GobblinProcessBuilder processBuilder; private final SystemPropertiesWrapper propertiesWrapper; private final Path clusterConfigFilePath; private final Config sysConfig; SingleTaskLauncher(final GobblinProcessBuilder processBuilder, final SystemPropertiesWrapper propertiesWrapper, final Path clusterConfigFilePath, Config sysConfig) { this.processBuilder = processBuilder; this.propertiesWrapper = propertiesWrapper; this.clusterConfigFilePath = clusterConfigFilePath; this.sysConfig = sysConfig; } Process launch(final String jobId, final Path workUnitFilePath) throws IOException { final SingleTaskLauncher.CmdBuilder cmdBuilder = this.new CmdBuilder(jobId, workUnitFilePath); final List<String> command = cmdBuilder.build(); logger.info("Launching a task process."); // The -cp parameter list can be very long. final String completeCmdLine = String.join(" ", command); logger.info("cmd line:\n{}", completeCmdLine); final Process taskProcess = this.processBuilder.start(command); return taskProcess; } private class CmdBuilder { private final String jobId; private final Path workUnitFilePath; private final List<String> cmd = new ArrayList<>(); private CmdBuilder(final String jobId, final Path workUnitFilePath) { this.jobId = jobId; this.workUnitFilePath = workUnitFilePath; } List<String> build() { addJavaBin(); addJavaOptions(); addClassPath(); addLogConfig(); addClassName(); addOptions(); return this.cmd; } private void addJavaOptions() { if (sysConfig.hasPath(GobblinClusterConfigurationKeys.TASK_JVM_OPTIONS)) { final String taskJvmOptions = sysConfig.getString(GobblinClusterConfigurationKeys.TASK_JVM_OPTIONS); StrTokenizer tokenizer = new StrTokenizer(taskJvmOptions, ' ', '"'); while(tokenizer.hasNext()) { this.cmd.add(tokenizer.next()); } } } private void addClassName() { final String runnerClassName = SingleTaskRunnerMain.class.getCanonicalName(); this.cmd.add(runnerClassName); } private void addJavaBin() { final String javaHomeDir = SingleTaskLauncher.this.propertiesWrapper.getJavaHome(); final Path javaBinPath = Paths.get(javaHomeDir, "bin", "java"); this.cmd.add(javaBinPath.toString()); } private void addLogConfig() { if (sysConfig.hasPath(GobblinClusterConfigurationKeys.TASK_LOG_CONFIG)) { String logConfig = sysConfig.getString(GobblinClusterConfigurationKeys.TASK_LOG_CONFIG); this.cmd.add(logConfig); } } private void addClassPath() { this.cmd.add("-cp"); String classPath; if (sysConfig.hasPath(GobblinClusterConfigurationKeys.TASK_CLASSPATH)) { classPath = sysConfig.getString(GobblinClusterConfigurationKeys.TASK_CLASSPATH); } else { classPath = SingleTaskLauncher.this.propertiesWrapper.getJavaClassPath(); } this.cmd.add(classPath); } private void addOptions() { addClusterConfigPath(); addJobId(); addWorkUnitPath(); } private void addClusterConfigPath() { addOneOption(CLUSTER_CONFIG_FILE_PATH, SingleTaskLauncher.this.clusterConfigFilePath.toString()); } private void addWorkUnitPath() { addOneOption(WORK_UNIT_FILE_PATH, this.workUnitFilePath.toString()); } private void addJobId() { addOneOption(JOB_ID, this.jobId); } private void addOneOption(final String key, final String value) { this.cmd.add("--" + key); this.cmd.add(value); } } }
2,243
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Map; import java.util.concurrent.Callable; import org.apache.gobblin.broker.SharedResourcesBrokerFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.runtime.TaskCreationException; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.runtime.util.StateStores; import org.apache.gobblin.source.workunit.MultiWorkUnit; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.Id; import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent; import org.apache.gobblin.util.eventbus.EventBusFactory; import org.apache.gobblin.util.retry.RetryerFactory; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.task.JobContext; import org.apache.helix.task.Task; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskConfig; import org.apache.helix.task.TaskDriver; import org.apache.helix.task.TaskResult; import org.slf4j.MDC; import com.github.rholder.retry.Retryer; import com.google.common.base.Throwables; import com.google.common.eventbus.EventBus; import com.google.common.io.Closer; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.extern.slf4j.Slf4j; import static org.apache.gobblin.cluster.HelixTaskEventMetadataGenerator.HELIX_INSTANCE_KEY; import static org.apache.gobblin.cluster.HelixTaskEventMetadataGenerator.HELIX_JOB_ID_KEY; import static org.apache.gobblin.cluster.HelixTaskEventMetadataGenerator.HELIX_TASK_ID_KEY; /** * An implementation of Helix's {@link org.apache.helix.task.Task} that wraps and runs one or more Gobblin * {@link org.apache.gobblin.runtime.Task}s. * * <p> * Upon startup, a {@link GobblinHelixTask} reads the property * {@link GobblinClusterConfigurationKeys#WORK_UNIT_FILE_PATH} for the path of the file storing a serialized * {@link WorkUnit} on the {@link FileSystem} of choice and de-serializes the {@link WorkUnit}. Depending on * if the serialized {@link WorkUnit} is a {@link MultiWorkUnit}, it then creates one or more Gobblin * {@link org.apache.gobblin.runtime.Task}s to run the {@link WorkUnit}(s) (possibly wrapped in the {@link MultiWorkUnit}) * and waits for the Gobblin {@link org.apache.gobblin.runtime.Task}(s) to finish. Upon completion of the Gobblin * {@link org.apache.gobblin.runtime.Task}(s), it persists the {@link TaskState} of each {@link org.apache.gobblin.runtime.Task} to * a file that will be collected by the {@link GobblinHelixJobLauncher} later upon completion of the job. * </p> */ @Slf4j public class GobblinHelixTask implements Task { private final TaskConfig taskConfig; private final String applicationName; private final String instanceName; private String jobName; private String jobId; private String helixJobId; private String jobKey; private String taskId; private Path workUnitFilePath; private GobblinHelixTaskMetrics taskMetrics; private SingleTask task; private String helixTaskId; private EventBus eventBus; private boolean isCanceled; public GobblinHelixTask(TaskRunnerSuiteBase.Builder builder, TaskCallbackContext taskCallbackContext, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, GobblinHelixTaskMetrics taskMetrics, TaskDriver taskDriver) { this.taskConfig = taskCallbackContext.getTaskConfig(); this.helixJobId = taskCallbackContext.getJobConfig().getJobId(); this.applicationName = builder.getApplicationName(); this.instanceName = builder.getInstanceName(); this.taskMetrics = taskMetrics; getInfoFromTaskConfig(); Path jobStateFilePath = GobblinClusterUtils .getJobStateFilePath(stateStores.haveJobStateStore(), builder.getAppWorkPath(), this.jobId); Integer partitionNum = getPartitionForHelixTask(taskDriver); if (partitionNum == null) { throw new IllegalStateException(String.format("Task %s, job %s on instance %s has no partition assigned", this.helixTaskId, builder.getInstanceName(), this.helixJobId)); } // Dynamic config is considered as part of JobState in SingleTask // Important to distinguish between dynamicConfig and Config final Config dynamicConfig = builder.getDynamicConfig() .withValue(GobblinClusterConfigurationKeys.TASK_RUNNER_HOST_NAME_KEY, ConfigValueFactory.fromAnyRef(builder.getHostName())) .withValue(GobblinClusterConfigurationKeys.CONTAINER_ID_KEY, ConfigValueFactory.fromAnyRef(builder.getContainerId())) .withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, ConfigValueFactory.fromAnyRef(builder.getInstanceName())) .withValue(GobblinClusterConfigurationKeys.HELIX_JOB_ID_KEY, ConfigValueFactory.fromAnyRef(this.helixJobId)) .withValue(GobblinClusterConfigurationKeys.HELIX_TASK_ID_KEY, ConfigValueFactory.fromAnyRef(this.helixTaskId)) .withValue(GobblinClusterConfigurationKeys.HELIX_PARTITION_ID_KEY, ConfigValueFactory.fromAnyRef(partitionNum)); Retryer<SingleTask> retryer = RetryerFactory.newInstance(builder.getConfig()); try { eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME, SharedResourcesBrokerFactory.getImplicitBroker()); this.task = retryer.call(new Callable<SingleTask>() { @Override public SingleTask call() { return new SingleTask(jobId, workUnitFilePath, jobStateFilePath, builder.getFs(), taskAttemptBuilder, stateStores, dynamicConfig); } }); } catch (Exception e) { log.error("Execution in creating a SingleTask-with-retry failed, will create a failing task", e); this.task = new SingleFailInCreationTask(jobId, workUnitFilePath, jobStateFilePath, builder.getFs(), taskAttemptBuilder, stateStores, dynamicConfig); } } private void getInfoFromTaskConfig() { Map<String, String> configMap = this.taskConfig.getConfigMap(); this.jobName = configMap.get(ConfigurationKeys.JOB_NAME_KEY); this.jobId = configMap.get(ConfigurationKeys.JOB_ID_KEY); this.helixTaskId = this.taskConfig.getId(); this.jobKey = Long.toString(Id.parse(this.jobId).getSequence()); this.taskId = configMap.get(ConfigurationKeys.TASK_ID_KEY); this.workUnitFilePath = new Path(configMap.get(GobblinClusterConfigurationKeys.WORK_UNIT_FILE_PATH)); } @Override public TaskResult run() { this.taskMetrics.helixTaskTotalRunning.incrementAndGet(); this.isCanceled = false; long startTime = System.currentTimeMillis(); log.info("Actual task {} started. [{} {}]", this.taskId, this.applicationName, this.instanceName); try (Closer closer = Closer.create()) { closer.register(MDC.putCloseable(ConfigurationKeys.JOB_NAME_KEY, this.jobName)); closer.register(MDC.putCloseable(ConfigurationKeys.JOB_KEY_KEY, this.jobKey)); this.task.run(); // Since we enable gracefully cancel, when task get cancelled, we might not see any exception, // so we check the isCanceled flag to make sure we return the correct task status if (this.isCanceled) { log.error("Actual task {} canceled.", this.taskId); this.taskMetrics.helixTaskTotalCancelled.incrementAndGet(); return new TaskResult(TaskResult.Status.CANCELED, ""); } log.info("Actual task {} completed.", this.taskId); this.taskMetrics.helixTaskTotalCompleted.incrementAndGet(); return new TaskResult(TaskResult.Status.COMPLETED, ""); } catch (InterruptedException ie) { log.error("Interrupting task {}", this.taskId); Thread.currentThread().interrupt(); log.error("Actual task {} interrupted.", this.taskId); this.taskMetrics.helixTaskTotalFailed.incrementAndGet(); return new TaskResult(TaskResult.Status.CANCELED, ""); } catch (TaskCreationException te) { eventBus.post(createTaskCreationEvent("Task Execution")); log.error("Actual task {} failed in creation due to {}, will request new container to schedule it", this.taskId, te.getMessage()); this.taskMetrics.helixTaskTotalCancelled.incrementAndGet(); return new TaskResult(TaskResult.Status.FAILED, "Root cause:" + ExceptionUtils.getRootCauseMessage(te)); } catch (Throwable t) { log.error(String.format("Actual task %s failed due to:", this.taskId), t); this.taskMetrics.helixTaskTotalCancelled.incrementAndGet(); return new TaskResult(TaskResult.Status.FAILED, ""); } finally { this.taskMetrics.helixTaskTotalRunning.decrementAndGet(); this.taskMetrics.updateTimeForTaskExecution(startTime); } } private ContainerHealthCheckFailureEvent createTaskCreationEvent(String phase) { ContainerHealthCheckFailureEvent event = new ContainerHealthCheckFailureEvent( ConfigFactory.parseMap(this.taskConfig.getConfigMap()) , getClass().getName()); event.addMetadata("jobName", this.jobName); event.addMetadata("AppName", this.applicationName); event.addMetadata(HELIX_INSTANCE_KEY, this.instanceName); event.addMetadata(HELIX_JOB_ID_KEY, this.helixJobId); event.addMetadata(HELIX_TASK_ID_KEY, this.helixTaskId); event.addMetadata("WUPath", this.workUnitFilePath.toString()); event.addMetadata("Phase", phase); return event; } private Integer getPartitionForHelixTask(TaskDriver taskDriver) { //Get Helix partition id for this task JobContext jobContext = taskDriver.getJobContext(this.helixJobId); if (jobContext != null) { return jobContext.getTaskIdPartitionMap().get(this.helixTaskId); } return null; } @Override public void cancel() { log.info("Gobblin helix task cancellation invoked for jobId {}.", jobId); if (this.task != null ) { try { this.isCanceled = true; this.task.cancel(); log.info("Gobblin helix task cancellation completed for jobId {}.", jobId); } catch (Throwable t) { log.info("Gobblin helix task cancellation for jobId {} failed with exception.", jobId, t); Throwables.propagate(t); } } else { log.warn("Cancel called for an uninitialized Gobblin helix task for jobId {}.", jobId); } } }
2,244
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SleepingTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.File; import java.io.IOException; import com.google.common.base.Throwables; import com.google.common.io.Files; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.runtime.TaskContext; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.runtime.task.BaseAbstractTask; @Slf4j public class SleepingTask extends BaseAbstractTask { public static final String TASK_STATE_FILE_KEY = "task.state.file.path"; public static final String SLEEP_TIME_IN_SECONDS = "data.publisher.sleep.time.in.seconds"; private final long sleepTime; private File taskStateFile; public SleepingTask(TaskContext taskContext) { super(taskContext); TaskState taskState = taskContext.getTaskState(); sleepTime = taskState.getPropAsLong(SLEEP_TIME_IN_SECONDS, 10L); taskStateFile = new File(taskState.getProp(TASK_STATE_FILE_KEY)); try { if (taskStateFile.exists()) { if (!taskStateFile.delete()) { log.error("Unable to delete {}", taskStateFile); throw new IOException("File Delete Exception"); } } else { Files.createParentDirs(taskStateFile); } } catch (IOException e) { log.error("Unable to create directory: ", taskStateFile.getParent()); Throwables.propagate(e); } taskStateFile.deleteOnExit(); } @Override public void run() { try { if (!taskStateFile.createNewFile()) { throw new IOException("File creation error: " + taskStateFile.getName()); } long endTime = System.currentTimeMillis() + sleepTime * 1000; while (System.currentTimeMillis() <= endTime) { Thread.sleep(1000L); log.warn("Sleeping for {} seconds", sleepTime); } log.info("Hello World!"); super.run(); } catch (InterruptedException e) { log.error("Sleep interrupted."); Thread.currentThread().interrupt(); Throwables.propagate(e); } catch (IOException e) { log.error("IOException encountered when creating {}", taskStateFile.getName(), e); Throwables.propagate(e); } } }
2,245
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixConstants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; /** * Class for defining constants used while interacting with Helix */ public class GobblinHelixConstants { public static final String SHUTDOWN_MESSAGE_TYPE = "SHUTDOWN"; }
2,246
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobSchedulerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Properties; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import com.google.common.base.Optional; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.runtime.api.JobExecutionLauncher; class GobblinHelixJobSchedulerMetrics extends StandardMetricsBridge.StandardMetrics { public static final String SCHEDULE_CANCELLATION_START = "scheduleCancellationStart"; public static final String SCHEDULE_CANCELLATION_END = "scheduleCancellationEnd"; public static final String TIMER_BEFORE_JOB_SCHEDULING = "timerBeforeJobScheduling"; public static final String TIMER_BEFORE_JOB_LAUNCHING = "timerBeforeJobLaunching"; public static final String TIMER_BETWEEN_JOB_SCHEDULING_AND_LAUNCHING = "timerBetwenJobSchedulingAndLaunching"; final AtomicLong numCancellationStart; final AtomicLong numCancellationComplete; final ContextAwareTimer timeBeforeJobScheduling; final ContextAwareTimer timeBeforeJobLaunching; final ContextAwareTimer timeBetwenJobSchedulingAndLaunching; final ThreadPoolExecutor threadPoolExecutor; public GobblinHelixJobSchedulerMetrics (final ExecutorService jobExecutor, final MetricContext metricContext, int windowSizeInMin) { this.timeBeforeJobScheduling = metricContext.contextAwareTimer(TIMER_BEFORE_JOB_SCHEDULING, windowSizeInMin, TimeUnit.MINUTES); this.timeBeforeJobLaunching = metricContext.contextAwareTimer(TIMER_BEFORE_JOB_LAUNCHING, windowSizeInMin, TimeUnit.MINUTES); this.timeBetwenJobSchedulingAndLaunching = metricContext.contextAwareTimer(TIMER_BETWEEN_JOB_SCHEDULING_AND_LAUNCHING, windowSizeInMin, TimeUnit.MINUTES); this.numCancellationStart = new AtomicLong(0); this.numCancellationComplete = new AtomicLong(0); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(SCHEDULE_CANCELLATION_START, ()->this.numCancellationStart.get())); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(SCHEDULE_CANCELLATION_END, ()->this.numCancellationComplete.get())); this.contextAwareMetrics.add(timeBeforeJobScheduling); this.contextAwareMetrics.add(timeBeforeJobLaunching); this.contextAwareMetrics.add(timeBetwenJobSchedulingAndLaunching); this.threadPoolExecutor = (ThreadPoolExecutor) jobExecutor; // executor metrics this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_ACTIVE_COUNT, ()->this.threadPoolExecutor.getActiveCount())); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_MAX_POOL_SIZE, ()->this.threadPoolExecutor.getMaximumPoolSize())); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_POOL_SIZE, ()->this.threadPoolExecutor.getPoolSize())); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_CORE_POOL_SIZE, ()->this.threadPoolExecutor.getCorePoolSize())); this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_QUEUE_SIZE, ()->this.threadPoolExecutor.getQueue().size())); } void updateTimeBeforeJobScheduling (Properties jobProps) { long jobCreationTime = Long.parseLong(jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "0")); Instrumented.updateTimer(Optional.of(timeBeforeJobScheduling), System.currentTimeMillis() - jobCreationTime, TimeUnit.MILLISECONDS); } void updateTimeBeforeJobLaunching (Properties jobProps) { long jobCreationTime = Long.parseLong(jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "0")); Instrumented.updateTimer(Optional.of(timeBeforeJobLaunching), System.currentTimeMillis() - jobCreationTime, TimeUnit.MILLISECONDS); } void updateTimeBetweenJobSchedulingAndJobLaunching (long scheduledTime, long launchingTime) { Instrumented.updateTimer(Optional.of(timeBetwenJobSchedulingAndLaunching), launchingTime - scheduledTime, TimeUnit.MILLISECONDS); } }
2,247
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/HelixAssignedParticipantCheck.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.helix.HelixManager; import org.apache.helix.HelixManagerFactory; import org.apache.helix.InstanceType; import org.apache.helix.task.JobContext; import org.apache.helix.task.TaskDriver; import com.github.rholder.retry.AttemptTimeLimiters; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.google.common.annotations.VisibleForTesting; import com.typesafe.config.Config; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alias; import org.apache.gobblin.commit.CommitStep; import org.apache.gobblin.commit.CommitStepException; /** * A {@link CommitStep} that checks with Helix if a particular Helix instance is still the assigned participant for a given * Helix Partition. This {@link CommitStep} implementation is a safety check against Helix and is intended to be used * before data is published and state is committed. The primiary motivation for this {@link CommitStep} is to avoid a "split-brain" * scenario where a runaway Helix task continues to process a partition even though Helix has assigned the same * partition to a different Helix task. This can happen due to inconsistency between the state of a task as maintained * by Helix on ZK vs the local state of the task. */ @Slf4j @Alias (value = "HelixParticipantCheck") public class HelixAssignedParticipantCheck implements CommitStep { @Getter @VisibleForTesting private static volatile HelixManager helixManager = null; private static volatile Retryer<Boolean> retryer = RetryerBuilder.<Boolean>newBuilder() .retryIfException() .withStopStrategy(StopStrategies.stopAfterAttempt(3)) .withAttemptTimeLimiter(AttemptTimeLimiters.fixedTimeLimit(3000, TimeUnit.MILLISECONDS)).build(); private final String helixInstanceName; private final String helixJob; private final int partitionNum; private final Config config; /** * A method that uses the Singleton pattern to instantiate a {@link HelixManager} instance. * @param config * @return */ public static void initHelixManager(Config config) throws Exception { if (helixManager == null) { synchronized (HelixAssignedParticipantCheck.class) { if (helixManager == null) { String zkConnectString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); String clusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY); helixManager = HelixManagerFactory.getZKHelixManager(clusterName, HelixAssignedParticipantCheck.class.getSimpleName(), InstanceType.SPECTATOR, zkConnectString); helixManager.connect(); } } } } /** * Refresh {@link HelixManager} instance. Invoked when the underlying ZkClient is closed causing Helix * APIs to throw an Exception. * @throws Exception */ private void refreshHelixManager() throws Exception { synchronized (HelixAssignedParticipantCheck.class) { //Ensure existing instance is disconnected to close any open connections. helixManager.disconnect(); helixManager = null; initHelixManager(config); } } public HelixAssignedParticipantCheck(Config config) throws Exception { this.config = config; initHelixManager(config); this.helixInstanceName = config.getString(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY); this.helixJob = config.getString(GobblinClusterConfigurationKeys.HELIX_JOB_ID_KEY); this.partitionNum = config.getInt(GobblinClusterConfigurationKeys.HELIX_PARTITION_ID_KEY); } /** * Determine whether the commit step has been completed. */ @Override public boolean isCompleted() { return false; } /** * Execute the commit step. */ @Override public void execute() throws CommitStepException { log.info(String.format("HelixParticipantCheck step called for Helix Instance: %s, Helix job: %s, Helix partition: %d", this.helixInstanceName, this.helixJob, this.partitionNum)); //Query Helix to get the currently assigned participant for the Helix partitionNum Callable callable = () -> { JobContext jobContext; try { TaskDriver taskDriver = new TaskDriver(helixManager); jobContext = taskDriver.getJobContext(helixJob); } catch (Exception e) { log.info("Encountered exception when executing " + getClass().getSimpleName(), e); log.info("Refreshing Helix manager.."); refreshHelixManager(); //Rethrow the exception to trigger a retry. throw e; } if (jobContext != null) { String participant = jobContext.getAssignedParticipant(partitionNum); if (participant == null) { log.error("The current assigned participant is null. This implies that \n" + "\t\t(a)Helix failed to write to zookeeper, which is often caused by lack of compression leading / exceeding zookeeper jute max buffer size (Default 1MB)\n" + "\t\t(b)Helix reassigned the task (unlikely if this current task has been running without issue. Helix does not have code for reassigning \"running\" tasks)\n" + "\t\tNote: This logic is true as of Helix version 1.0.2 and ZK version 3.6"); return false; } boolean isAssignedParticipant = participant.equalsIgnoreCase(helixInstanceName); if (!isAssignedParticipant) { log.info("The current helix instance is not the assigned participant. helixInstanceName={}, assignedParticipant={}", helixInstanceName, participant); } return isAssignedParticipant; } return false; }; boolean isParticipant; try { isParticipant = retryer.call(callable); } catch (ExecutionException | RetryException e) { log.error("Cannot complete participant assignment check within the retry limit due to: {}", e); //Set isParticipant to true; since we cannot verify the status of the Helix Participant at this time. isParticipant = true; } if (!isParticipant) { throw new CommitStepException(String.format("Helix instance %s not the assigned participant for partition %d",this.helixInstanceName, this.partitionNum)); } } }
2,248
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTaskRunnerMain.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.util.Arrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Charsets; class SingleTaskRunnerMain { private static final Logger logger = LoggerFactory.getLogger(SingleTaskRunnerMain.class); private final SingleTaskRunnerBuilder builder; SingleTaskRunnerMain(final SingleTaskRunnerBuilder builder) { this.builder = builder; } public static void main(final String[] args) { logger.info("SingleTaskRunnerMain starting. args: " + Arrays.toString(args)); final SingleTaskRunnerMain runnerMain = new SingleTaskRunnerMain(new SingleTaskRunnerBuilder()); try { runnerMain.run(args); } catch (final Exception e) { logger.error("Got an exception running a single task.", e); System.exit(1); } } void run(final String[] args) throws IOException, InterruptedException { final OutputStreamWriter streamWriter = new OutputStreamWriter(System.out, Charsets.UTF_8); final PrintWriter writer = new PrintWriter(streamWriter, true); final SingleTaskRunnerMainOptions options = new SingleTaskRunnerMainOptions(args, writer); final SingleTaskRunner runner = this.builder.setClusterConfigFilePath(options.getClusterConfigFilePath()) .setJobId(options.getJobId()) .setWorkUnitFilePath(options.getWorkUnitFilePath()) .createSingleTaskRunner(); runner.run(); } }
2,249
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinClusterManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Properties; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.Criteria; import org.apache.helix.HelixManager; import org.apache.helix.InstanceType; import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory; import org.apache.helix.model.ClusterConfig; import org.apache.helix.model.Message; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import com.google.common.util.concurrent.Service; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.AccessLevel; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.app.ApplicationException; import org.apache.gobblin.runtime.app.ApplicationLauncher; import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher; import org.apache.gobblin.scheduler.SchedulerService; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.JvmUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * The central cluster manager for Gobblin Clusters. * * <p> * This class runs the {@link GobblinHelixJobScheduler} for scheduling and running Gobblin jobs. * This class serves as the Helix controller and it uses a {@link HelixManager} to work with Helix. * </p> * * <p> * This class will initiates a graceful shutdown of the cluster in the following conditions: * * <ul> * <li>A shutdown request is received via a Helix message of subtype * {@link HelixMessageSubTypes#APPLICATION_MASTER_SHUTDOWN}. Upon receiving such a message, * it will call {@link #stop()} to initiate a graceful shutdown of the cluster</li> * <li>The shutdown hook gets called. The shutdown hook will call {@link #stop()}, which will * start a graceful shutdown of the cluster.</li> * </ul> * </p> * * @author Yinan Li */ @Alpha @Slf4j public class GobblinClusterManager implements ApplicationLauncher, StandardMetricsBridge, LeadershipChangeAwareComponent { private static final Logger LOGGER = LoggerFactory.getLogger(GobblinClusterManager.class); private StopStatus stopStatus = new StopStatus(false); protected ServiceBasedAppLauncher applicationLauncher; // An EventBus used for communications between services running in the ApplicationMaster @Getter(AccessLevel.PUBLIC) protected final EventBus eventBus = new EventBus(GobblinClusterManager.class.getSimpleName()); protected final Path appWorkDir; @Getter protected final FileSystem fs; protected final String applicationId; // thread used to keep process up for an idle controller private Thread idleProcessThread; // set to true to stop the idle process thread private volatile boolean stopIdleProcessThread = false; private final boolean isStandaloneMode; @Getter protected GobblinHelixMultiManager multiManager; @Getter private MutableJobCatalog jobCatalog; @Getter private GobblinHelixJobScheduler jobScheduler; @Getter private JobConfigurationManager jobConfigurationManager; @Getter private volatile boolean started = false; protected final String clusterName; @Getter protected final Config config; public GobblinClusterManager(String clusterName, String applicationId, Config sysConfig, Optional<Path> appWorkDirOptional) throws Exception { // Set system properties passed in via application config. As an example, Helix uses System#getProperty() for ZK configuration // overrides such as sessionTimeout. In this case, the overrides specified // in the application configuration have to be extracted and set before initializing HelixManager. GobblinClusterUtils.setSystemProperties(sysConfig); //Add dynamic config this.config = GobblinClusterUtils.addDynamicConfig(sysConfig); this.clusterName = clusterName; this.isStandaloneMode = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE_KEY, GobblinClusterConfigurationKeys.DEFAULT_STANDALONE_CLUSTER_MODE); this.applicationId = applicationId; initializeHelixManager(); this.fs = GobblinClusterUtils.buildFileSystem(this.config, new Configuration()); this.appWorkDir = appWorkDirOptional.isPresent() ? appWorkDirOptional.get() : GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, clusterName, applicationId); LOGGER.info("Configured GobblinClusterManager work dir to: {}", this.appWorkDir); initializeAppLauncherAndServices(); } /** * Create the service based application launcher and other associated services * @throws Exception */ private void initializeAppLauncherAndServices() throws Exception { // Done to preserve backwards compatibility with the previously hard-coded timeout of 5 minutes Properties properties = ConfigUtils.configToProperties(this.config); if (!properties.contains(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS)) { properties.setProperty(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS, Long.toString(300)); } this.applicationLauncher = new ServiceBasedAppLauncher(properties, this.clusterName); // create a job catalog for keeping track of received jobs if a job config path is specified if (this.config.hasPath(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX + ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY)) { String jobCatalogClassName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.JOB_CATALOG_KEY, GobblinClusterConfigurationKeys.DEFAULT_JOB_CATALOG); this.jobCatalog = (MutableJobCatalog) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(jobCatalogClassName), ImmutableList.of(config .getConfig(StringUtils.removeEnd(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX, ".")) .withFallback(this.config))); } else { this.jobCatalog = null; } SchedulerService schedulerService = new SchedulerService(properties); this.applicationLauncher.addService(schedulerService); this.jobScheduler = buildGobblinHelixJobScheduler(config, this.appWorkDir, getMetadataTags(clusterName, applicationId), schedulerService); this.applicationLauncher.addService(this.jobScheduler); this.jobConfigurationManager = buildJobConfigurationManager(config); this.applicationLauncher.addService(this.jobConfigurationManager); if (ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.CONTAINER_HEALTH_METRICS_SERVICE_ENABLED, GobblinClusterConfigurationKeys.DEFAULT_CONTAINER_HEALTH_METRICS_SERVICE_ENABLED)) { this.applicationLauncher.addService(new ContainerHealthMetricsService(config)); } } /** * Start any services required by the application launcher then start the application launcher */ private void startAppLauncherAndServices() { // other services such as the job configuration manager have a dependency on the job catalog, so it has be be // started first if (this.jobCatalog instanceof Service) { ((Service) this.jobCatalog).startAsync().awaitRunning(); } this.applicationLauncher.start(); } /** * Stop the application launcher then any services that were started outside of the application launcher */ private void stopAppLauncherAndServices() { try { this.applicationLauncher.stop(); } catch (ApplicationException ae) { LOGGER.error("Error while stopping Gobblin Cluster application launcher", ae); } if (this.jobCatalog instanceof Service) { ((Service) this.jobCatalog).stopAsync().awaitTerminated(); } } /** * Configure Helix quota-based task scheduling. * This config controls the number of tasks that are concurrently assigned to a single Helix instance. * Reference: https://helix.apache.org/1.0.3-docs/quota_scheduling.html */ @VisibleForTesting void configureHelixQuotaBasedTaskScheduling() { // set up the cluster quota config List<String> quotaConfigList = ConfigUtils.getStringList(this.config, GobblinClusterConfigurationKeys.HELIX_TASK_QUOTA_CONFIG_KEY); if (quotaConfigList.isEmpty()) { return; } // retrieve the cluster config for updating ClusterConfig clusterConfig = this.multiManager.getJobClusterHelixManager().getConfigAccessor() .getClusterConfig(this.clusterName); clusterConfig.resetTaskQuotaRatioMap(); for (String entry : quotaConfigList) { List<String> quotaConfig = Splitter.on(":").limit(2).trimResults().omitEmptyStrings().splitToList(entry); if (quotaConfig.size() < 2) { throw new IllegalArgumentException( "Quota configurations must be of the form <key1>:<value1>,<key2>:<value2>,..."); } clusterConfig.setTaskQuotaRatio(quotaConfig.get(0), Integer.parseInt(quotaConfig.get(1))); } this.multiManager.getJobClusterHelixManager().getConfigAccessor() .setClusterConfig(this.clusterName, clusterConfig); // Set the new ClusterConfig } /** * Start the Gobblin Cluster Manager. */ @Override public synchronized void start() { LOGGER.info("Starting the Gobblin Cluster Manager"); this.eventBus.register(this); setupHelix(); if (this.isStandaloneMode) { // standalone mode starts non-daemon threads later, so need to have this thread to keep process up this.idleProcessThread = new Thread(new Runnable() { @Override public void run() { while (!GobblinClusterManager.this.stopStatus.isStopInProgress() && !GobblinClusterManager.this.stopIdleProcessThread) { try { Thread.sleep(300); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } } } }); this.idleProcessThread.start(); // Need this in case a kill is issued to the process so that the idle thread does not keep the process up // since GobblinClusterManager.stop() is not called this case. Runtime.getRuntime().addShutdownHook(new Thread(() -> GobblinClusterManager.this.stopIdleProcessThread = true)); } else { startAppLauncherAndServices(); } this.started = true; } public synchronized void setupHelix() { this.multiManager.connect(); // Standalone mode registers a handler to clean up on manager leadership change, so only clean up for non-standalone // mode, such as YARN mode if (!this.isStandaloneMode) { this.multiManager.cleanUpJobs(); } configureHelixQuotaBasedTaskScheduling(); } /** * Stop the Gobblin Cluster Manager. */ @Override public synchronized void stop() { if (this.stopStatus.isStopInProgress()) { return; } this.stopStatus.setStopInprogress(true); LOGGER.info("Stopping the Gobblin Cluster Manager"); if (this.idleProcessThread != null) { try { this.idleProcessThread.join(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } // Send a shutdown request to all GobblinTaskRunners unless running in standalone mode. // In standalone mode a failing manager should not stop the whole cluster. if (!this.isStandaloneMode) { sendShutdownRequest(); } stopAppLauncherAndServices(); this.multiManager.disconnect(); } /** * Get additional {@link Tag}s required for any type of reporting. */ private List<? extends Tag<?>> getMetadataTags(String applicationName, String applicationId) { return Tag.fromMap( new ImmutableMap.Builder<String, Object>().put(GobblinClusterMetricTagNames.APPLICATION_NAME, applicationName) .put(GobblinClusterMetricTagNames.APPLICATION_ID, applicationId).build()); } /** * Build the {@link GobblinHelixJobScheduler} for the Application Master. */ private GobblinHelixJobScheduler buildGobblinHelixJobScheduler(Config sysConfig, Path appWorkDir, List<? extends Tag<?>> metadataTags, SchedulerService schedulerService) throws Exception { return new GobblinHelixJobScheduler(sysConfig, this.multiManager.getJobClusterHelixManager(), this.multiManager.getTaskDriverHelixManager(), this.eventBus, appWorkDir, metadataTags, schedulerService, this.jobCatalog); } /** * Build the {@link JobConfigurationManager} for the Application Master. */ private JobConfigurationManager buildJobConfigurationManager(Config config) { try { List<Object> argumentList = (this.jobCatalog != null)? ImmutableList.of(this.eventBus, config, this.jobCatalog, this.fs) : ImmutableList.of(this.eventBus, config, this.fs); if (config.hasPath(GobblinClusterConfigurationKeys.JOB_CONFIGURATION_MANAGER_KEY)) { return (JobConfigurationManager) GobblinConstructorUtils.invokeLongestConstructor(Class.forName( config.getString(GobblinClusterConfigurationKeys.JOB_CONFIGURATION_MANAGER_KEY)), argumentList.toArray(new Object[argumentList.size()])); } else { return new JobConfigurationManager(this.eventBus, config); } } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } @SuppressWarnings("unused") @Subscribe public void handleApplicationMasterShutdownRequest(ClusterManagerShutdownRequest shutdownRequest) { stop(); } /** * Creates and returns a {@link MultiTypeMessageHandlerFactory} for handling of Helix * {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}s. * * @returns a {@link MultiTypeMessageHandlerFactory}. */ protected MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() { return new GobblinHelixMultiManager.ControllerUserDefinedMessageHandlerFactory(); } @VisibleForTesting void connectHelixManager() { this.multiManager.connect(); } @VisibleForTesting void disconnectHelixManager() { this.multiManager.disconnect(); } @VisibleForTesting boolean isHelixManagerConnected() { return this.multiManager.isConnected(); } /** * In separate controller mode, one controller will manage manager's HA, the other will handle the job dispatching and * work unit assignment. */ @VisibleForTesting void initializeHelixManager() { this.multiManager = createMultiManager(); this.multiManager.addLeadershipChangeAwareComponent(this); } /*** * Can be overriden to inject mock GobblinHelixMultiManager * @return a new GobblinHelixMultiManager */ public GobblinHelixMultiManager createMultiManager() { return new GobblinHelixMultiManager(this.config, aVoid -> GobblinClusterManager.this.getUserDefinedMessageHandlerFactory(), this.eventBus, stopStatus); } @VisibleForTesting void sendShutdownRequest() { Criteria criteria = new Criteria(); criteria.setInstanceName("%"); criteria.setResource("%"); criteria.setPartition("%"); criteria.setPartitionState("%"); criteria.setRecipientInstanceType(InstanceType.PARTICIPANT); // #HELIX-0.6.7-WORKAROUND // Add this back when messaging to instances is ported to 0.6 branch //criteria.setDataSource(Criteria.DataSource.LIVEINSTANCES); criteria.setSessionSpecific(true); Message shutdownRequest = new Message(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE, HelixMessageSubTypes.WORK_UNIT_RUNNER_SHUTDOWN.toString().toLowerCase() + UUID.randomUUID().toString()); shutdownRequest.setMsgSubType(HelixMessageSubTypes.WORK_UNIT_RUNNER_SHUTDOWN.toString()); shutdownRequest.setMsgState(Message.MessageState.NEW); // Wait for 5 minutes final int timeout = 300000; // #HELIX-0.6.7-WORKAROUND // Temporarily bypass the default messaging service to allow upgrade to 0.6.7 which is missing support // for messaging to instances //int messagesSent = this.helixManager.getMessagingService().send(criteria, shutdownRequest, // new NoopReplyHandler(), timeout); GobblinHelixMessagingService messagingService = new GobblinHelixMessagingService(this.multiManager.getJobClusterHelixManager()); int messagesSent = messagingService.send(criteria, shutdownRequest, new NoopReplyHandler(), timeout); if (messagesSent == 0) { LOGGER.error(String.format("Failed to send the %s message to the participants", shutdownRequest.getMsgSubType())); } } @Override public void close() throws IOException { this.applicationLauncher.close(); } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { List<StandardMetrics> list = new ArrayList(); list.addAll(this.jobScheduler.getStandardMetricsCollection()); list.addAll(this.multiManager.getStandardMetricsCollection()); list.addAll(this.jobCatalog.getStandardMetricsCollection()); list.addAll(this.jobConfigurationManager.getStandardMetricsCollection()); return list; } /** * TODO for now the cluster id is hardcoded to 1 both here and in the {@link GobblinTaskRunner}. In the future, the * cluster id should be created by the {@link GobblinClusterManager} and passed to each {@link GobblinTaskRunner} via * Helix (at least that would be the easiest approach, there are certainly others ways to do it). */ private static String getApplicationId() { return "1"; } private static Options buildOptions() { Options options = new Options(); options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Gobblin application name"); options.addOption("s", GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE, true, "Standalone cluster mode"); options.addOption("i", GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME, true, "Helix instance name"); return options; } private static void printUsage(Options options) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(GobblinClusterManager.class.getSimpleName(), options); } public static void main(String[] args) throws Exception { Options options = buildOptions(); try { CommandLine cmd = new DefaultParser().parse(options, args); if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)) { printUsage(options); System.exit(1); } boolean isStandaloneClusterManager = false; if (cmd.hasOption(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE)) { isStandaloneClusterManager = Boolean.parseBoolean(cmd.getOptionValue(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE, "false")); } LOGGER.info(JvmUtils.getJvmInputArguments()); Config config = ConfigFactory.load(); if (cmd.hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)) { config = config.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, ConfigValueFactory.fromAnyRef(cmd.getOptionValue( GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME))); } if (isStandaloneClusterManager) { config = config.withValue(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE_KEY, ConfigValueFactory.fromAnyRef(true)); } try (GobblinClusterManager gobblinClusterManager = new GobblinClusterManager( cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), getApplicationId(), config, Optional.<Path>absent())) { // In AWS / Yarn mode, the cluster Launcher takes care of setting up Helix cluster /// .. but for Standalone mode, we go via this main() method, so setup the cluster here if (isStandaloneClusterManager) { // Create Helix cluster and connect to it String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); String helixClusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY); HelixUtils.createGobblinHelixCluster(zkConnectionString, helixClusterName, false); LOGGER.info("Created Helix cluster " + helixClusterName); } gobblinClusterManager.start(); } } catch (ParseException pe) { printUsage(options); System.exit(1); } } @Override public void becomeActive() { startAppLauncherAndServices(); } @Override public void becomeStandby() { stopAppLauncherAndServices(); try { initializeAppLauncherAndServices(); } catch (Exception e) { throw new RuntimeException("Exception reinitializing app launcher services ", e); } } static class StopStatus { @Getter @Setter AtomicBoolean isStopInProgress; public StopStatus(boolean inProgress) { isStopInProgress = new AtomicBoolean(inProgress); } public void setStopInprogress (boolean inProgress) { isStopInProgress.set(inProgress); } public boolean isStopInProgress () { return isStopInProgress.get(); } } }
2,250
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/TaskRunnerSuiteBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Collection; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixManager; import org.apache.helix.task.TaskFactory; import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Service; import com.typesafe.config.Config; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.util.ConfigUtils; /** * This suite class contains multiple components used by {@link GobblinTaskRunner}. * Here is the list of components it contains: * A {@link TaskFactory} : register Helix task state model. * A {@link MetricContext} : create task related metrics. * A {@link StandardMetricsBridge.StandardMetrics} : report task metrics. * A list of {@link Service} : register any runtime services necessary to run the tasks. */ @Slf4j @Alpha public abstract class TaskRunnerSuiteBase { protected MetricContext metricContext; protected String applicationId; protected String applicationName; protected List<Service> services = Lists.newArrayList(); protected TaskRunnerSuiteBase(Builder builder) { this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(builder.config), this.getClass()); this.applicationId = builder.getApplicationId(); this.applicationName = builder.getApplicationName(); } protected MetricContext getMetricContext() { return this.metricContext; } protected abstract Collection<StandardMetricsBridge.StandardMetrics> getMetricsCollection(); protected abstract Map<String, TaskFactory> getTaskFactoryMap(); protected abstract List<Service> getServices(); protected String getApplicationId() { return this.applicationId; } protected String getApplicationName() { return this.applicationName; } @Getter public static class Builder { private final Config config; private final Config dynamicConfig; private HelixManager jobHelixManager; private Optional<ContainerMetrics> containerMetrics; private FileSystem fs; private Path appWorkPath; private String applicationId; private String applicationName; private String instanceName; private String hostName; private String containerId; public Builder(Config config) { this.dynamicConfig = GobblinClusterUtils.getDynamicConfig(config); this.config = config; } public Builder setJobHelixManager(HelixManager jobHelixManager) { this.jobHelixManager = jobHelixManager; return this; } public Builder setApplicationName(String applicationName) { this.applicationName = applicationName; return this; } public Builder setInstanceName(String instanceName) { this.instanceName = instanceName; return this; } public Builder setContainerId (String containerId) { this.containerId = containerId; return this; } public Builder setHostName(String hostName) { this.hostName = hostName; return this; } public Builder setApplicationId(String applicationId) { this.applicationId = applicationId; return this; } public Builder setContainerMetrics(Optional<ContainerMetrics> containerMetrics) { this.containerMetrics = containerMetrics; return this; } public Builder setFileSystem(FileSystem fs) { this.fs = fs; return this; } public Builder setAppWorkPath(Path appWorkPath) { this.appWorkPath = appWorkPath; return this; } public TaskRunnerSuiteBase build() { if (getIsRunTaskInSeparateProcessEnabled(config)) { return new TaskRunnerSuiteProcessModel(this); } else { return new TaskRunnerSuiteThreadModel(this); } } private Boolean getIsRunTaskInSeparateProcessEnabled(Config config) { return ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.ENABLE_TASK_IN_SEPARATE_PROCESS, false); } } }
2,251
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/InMemorySingleTaskRunner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import org.apache.gobblin.runtime.util.StateStores; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.annotations.VisibleForTesting; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; /** * An taskRunner for in-memory {@link SingleTask} that can be switched to run a meant-to-failed task. * This class is primarily used for testing purpose. */ public class InMemorySingleTaskRunner extends SingleTaskRunner { // Inject configuration by calling set method. private Config injectedConfig = ConfigFactory.empty(); public InMemorySingleTaskRunner(String clusterConfigFilePath, String jobId, String workUnitFilePath) { super(clusterConfigFilePath, jobId, workUnitFilePath); } @Override protected SingleTask createSingleTaskHelper(TaskAttemptBuilder taskAttemptBuilder, FileSystem fs, StateStores stateStores, Path jobStateFilePath, boolean fail) throws IOException { return !fail ? new InMemoryWuSingleTask(this.jobId, new Path(this.workUnitFilePath), jobStateFilePath, fs, taskAttemptBuilder, stateStores, GobblinClusterUtils.getDynamicConfig(this.clusterConfig).withFallback(injectedConfig)) : new InMemoryWuFailedSingleTask(this.jobId, new Path(this.workUnitFilePath), jobStateFilePath, fs, taskAttemptBuilder, stateStores, GobblinClusterUtils.getDynamicConfig(this.clusterConfig).withFallback(injectedConfig)); } @VisibleForTesting void setInjectedConfig(Config injectedConfig) { this.injectedConfig = injectedConfig; } }
2,252
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleFailInCreationTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.typesafe.config.Config; import org.apache.gobblin.runtime.TaskCreationException; import org.apache.gobblin.runtime.util.StateStores; /** * A simple extension for {@link SingleTask} to directly throw exception for the case of task-creation failure. * We need this since Helix couldn't handle failure before startTask call as part of state transition and trigger * task reassignment. */ public class SingleFailInCreationTask extends SingleTask { public SingleFailInCreationTask(String jobId, Path workUnitFilePath, Path jobStateFilePath, FileSystem fs, TaskAttemptBuilder taskAttemptBuilder, StateStores stateStores, Config dynamicConfig) { //Since this is a dummy task that is designed to fail immediately on run(), we skip fetching the job state. super(jobId, workUnitFilePath, jobStateFilePath, fs, taskAttemptBuilder, stateStores, dynamicConfig, true); } @Override public void run() throws IOException, InterruptedException { throw new TaskCreationException("Failing task directly due to fatal issue in task-creation"); } }
2,253
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixMultiManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.apache.helix.HelixDataAccessor; import org.apache.helix.HelixAdmin; import org.apache.helix.HelixManager; import org.apache.helix.HelixProperty; import org.apache.helix.InstanceType; import org.apache.helix.NotificationContext; import org.apache.helix.api.listeners.ControllerChangeListener; import org.apache.helix.api.listeners.LiveInstanceChangeListener; import org.apache.helix.manager.zk.ZKHelixAdmin; import org.apache.helix.messaging.handling.HelixTaskResult; import org.apache.helix.messaging.handling.MessageHandler; import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Message; import org.apache.helix.task.TargetState; import org.apache.helix.task.TaskDriver; import org.apache.helix.task.WorkflowConfig; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.eventbus.EventBus; import com.google.common.util.concurrent.MoreExecutors; import com.typesafe.config.Config; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareHistogram; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.util.ConfigUtils; /** * Encapsulate all Helix related components: controller, participants, etc. * Provide all kinds of callbacks, listeners, message handlers that each Helix components need to register. */ @Slf4j public class GobblinHelixMultiManager implements StandardMetricsBridge { /** * Helix manager to handle cluster manager leader election. * Corresponds to cluster with key name {@link GobblinClusterConfigurationKeys#MANAGER_CLUSTER_NAME_KEY} iff dedicatedManagerCluster is true. * Corresponds to cluster with key name {@link GobblinClusterConfigurationKeys#HELIX_CLUSTER_NAME_KEY} iff dedicatedManagerCluster is false. */ @Getter private HelixManager managerClusterHelixManager = null; /** * Helix manager to handle job distribution. * Corresponds to cluster with key name {@link GobblinClusterConfigurationKeys#HELIX_CLUSTER_NAME_KEY}. */ @Getter private HelixManager jobClusterHelixManager = null; @Getter private HelixAdmin jobClusterHelixAdmin = null; /** * Helix manager to handle planning job distribution. * Corresponds to cluster with key name {@link GobblinClusterConfigurationKeys#HELIX_CLUSTER_NAME_KEY}. */ @Getter private Optional<HelixManager> taskDriverHelixManager = Optional.empty(); /** * Helix controller for job distribution. Effective only iff below two conditions are established: * 1. In {@link GobblinHelixMultiManager#dedicatedManagerCluster} mode. * 2. {@link GobblinHelixMultiManager#dedicatedJobClusterController} is turned on. * Typically used for unit test and local deployment. */ private Optional<HelixManager> jobClusterController = Optional.empty(); /** * Helix controller for planning job distribution. Effective only iff below two conditions are established: * 1. In {@link GobblinHelixMultiManager#dedicatedManagerCluster} mode. * 2. {@link GobblinHelixMultiManager#dedicatedTaskDriverCluster} is turned on. * Typically used for unit test and local deployment. */ private Optional<HelixManager> taskDriverClusterController = Optional.empty(); /** * Separate manager cluster and job distribution cluster iff this flag is turned on. Otherwise {@link GobblinHelixMultiManager#jobClusterHelixManager} * is same as {@link GobblinHelixMultiManager#managerClusterHelixManager}. */ private boolean dedicatedManagerCluster = false; private boolean dedicatedTaskDriverCluster = false; /** * Create a dedicated controller for job distribution. */ private boolean dedicatedJobClusterController = true; @Getter boolean isLeader = false; boolean isStandaloneMode = false; private final GobblinClusterManager.StopStatus stopStatus; private final Config config; private final EventBus eventBus; private final HelixManagerMetrics metrics; private final MultiTypeMessageHandlerFactory userDefinedMessageHandlerFactory; private final List<LeadershipChangeAwareComponent> leadershipChangeAwareComponents = Lists.newArrayList(); public GobblinHelixMultiManager( Config config, Function<Void, MultiTypeMessageHandlerFactory> messageHandlerFactoryFunction, EventBus eventBus, GobblinClusterManager.StopStatus stopStatus) { this.config = config; this.eventBus = eventBus; this.stopStatus = stopStatus; this.isStandaloneMode = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE_KEY, GobblinClusterConfigurationKeys.DEFAULT_STANDALONE_CLUSTER_MODE); MetricContext metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), this.getClass()); this.metrics = new HelixManagerMetrics(metricContext, this.config); this.dedicatedManagerCluster = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.DEDICATED_MANAGER_CLUSTER_ENABLED,false); this.dedicatedTaskDriverCluster = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.DEDICATED_TASK_DRIVER_CLUSTER_ENABLED, false); this.userDefinedMessageHandlerFactory = messageHandlerFactoryFunction.apply(null); initialize(); } protected void addLeadershipChangeAwareComponent (LeadershipChangeAwareComponent component) { this.leadershipChangeAwareComponents.add(component); } /** * Build the {@link HelixManager} for the Application Master. */ protected static HelixManager buildHelixManager(Config config, String clusterName, InstanceType type) { Preconditions.checkArgument(config.hasPath(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY)); String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); log.info("Using ZooKeeper connection string: " + zkConnectionString); String helixInstanceName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, GobblinClusterManager.class.getSimpleName()); return GobblinHelixManagerFactory.getZKHelixManager( config.getString(clusterName), helixInstanceName, type, zkConnectionString); } /** * Build the {@link org.apache.helix.HelixAdmin} for the AM */ protected static HelixAdmin buildHelixAdmin(Config cfg) { String zkConnectionString = cfg.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); return new ZKHelixAdmin.Builder() .setZkAddress(zkConnectionString) .build(); } public void initialize() { if (this.dedicatedManagerCluster) { Preconditions.checkArgument(this.config.hasPath(GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY)); log.info("We will use separate clusters to manage GobblinClusterManager and job distribution."); // This will create and register a Helix controller in ZooKeeper this.managerClusterHelixManager = buildHelixManager(this.config, GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY, InstanceType.CONTROLLER); // This will create a Helix administrator to dispatch jobs to ZooKeeper this.jobClusterHelixManager = buildHelixManager(this.config, GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY, InstanceType.ADMINISTRATOR); this.jobClusterHelixAdmin = buildHelixAdmin(this.config); // This will create a dedicated controller for job distribution this.dedicatedJobClusterController = ConfigUtils.getBoolean( this.config, GobblinClusterConfigurationKeys.DEDICATED_JOB_CLUSTER_CONTROLLER_ENABLED, true); if (this.dedicatedJobClusterController) { this.jobClusterController = Optional.of(GobblinHelixMultiManager .buildHelixManager(this.config, GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY, InstanceType.CONTROLLER)); } if (this.dedicatedTaskDriverCluster) { // This will create a Helix administrator to dispatch jobs to ZooKeeper this.taskDriverHelixManager = Optional.of(buildHelixManager(this.config, GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY, InstanceType.ADMINISTRATOR)); /** * Create a dedicated controller for planning job distribution. */ boolean dedicatedTaskDriverClusterController = ConfigUtils .getBoolean(this.config, GobblinClusterConfigurationKeys.DEDICATED_TASK_DRIVER_CLUSTER_CONTROLLER_ENABLED, true); // This will create a dedicated controller for planning job distribution if (dedicatedTaskDriverClusterController) { this.taskDriverClusterController = Optional.of(GobblinHelixMultiManager .buildHelixManager(this.config, GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY, InstanceType.CONTROLLER)); } } } else { log.info("We will use same cluster to manage GobblinClusterManager and job distribution."); // This will create and register a Helix controller in ZooKeeper boolean isHelixClusterManaged = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.IS_HELIX_CLUSTER_MANAGED, GobblinClusterConfigurationKeys.DEFAULT_IS_HELIX_CLUSTER_MANAGED); this.managerClusterHelixManager = buildHelixManager(this.config, GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY, isHelixClusterManaged ? InstanceType.PARTICIPANT : InstanceType.CONTROLLER); this.jobClusterHelixManager = this.managerClusterHelixManager; this.jobClusterHelixAdmin = buildHelixAdmin(this.config); } } @VisibleForTesting protected void connect() { try { this.isLeader = false; this.managerClusterHelixManager.connect(); if (this.dedicatedManagerCluster) { if (jobClusterController.isPresent()) { this.jobClusterController.get().connect(); } if (this.dedicatedTaskDriverCluster) { if (taskDriverClusterController.isPresent()) { this.taskDriverClusterController.get().connect(); } } this.jobClusterHelixManager.connect(); if (this.taskDriverHelixManager.isPresent()) { this.taskDriverHelixManager.get().connect(); } } this.jobClusterHelixManager.addLiveInstanceChangeListener(new GobblinLiveInstanceChangeListener()); this.jobClusterHelixManager.getMessagingService() .registerMessageHandlerFactory(Message.MessageType.USER_DEFINE_MSG.toString(), userDefinedMessageHandlerFactory); this.jobClusterHelixManager.getMessagingService() .registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE, new ControllerShutdownMessageHandlerFactory()); // standalone mode listens for controller change if (this.isStandaloneMode) { // Subscribe to leadership changes this.managerClusterHelixManager.addControllerListener((ControllerChangeListener) this::handleLeadershipChange); } } catch (Exception e) { log.error("HelixManager failed to connect", e); throw Throwables.propagate(e); } } protected boolean isConnected() { return managerClusterHelixManager.isConnected() && jobClusterHelixManager.isConnected(); } protected void disconnect() { if (managerClusterHelixManager.isConnected()) { this.managerClusterHelixManager.disconnect(); } if (this.dedicatedManagerCluster) { if (jobClusterHelixManager.isConnected()) { this.jobClusterHelixManager.disconnect(); } if (taskDriverHelixManager.isPresent()) { this.taskDriverHelixManager.get().disconnect(); } if (jobClusterController.isPresent() && jobClusterController.get().isConnected()) { this.jobClusterController.get().disconnect(); } if (taskDriverClusterController.isPresent() && taskDriverClusterController.get().isConnected()) { this.taskDriverClusterController.get().disconnect(); } } } /** * A custom implementation of {@link LiveInstanceChangeListener}. */ private static class GobblinLiveInstanceChangeListener implements LiveInstanceChangeListener { @Override public void onLiveInstanceChange(List<LiveInstance> liveInstances, NotificationContext changeContext) { if (log.isDebugEnabled()) { for (LiveInstance liveInstance : liveInstances) { log.debug("Live Helix participant instance: " + liveInstance.getInstanceName()); } } } } /** * Handle leadership change. * The applicationLauncher is only started on the leader. * The leader cleans up existing jobs before starting the applicationLauncher. * @param changeContext notification context */ @VisibleForTesting void handleLeadershipChange(NotificationContext changeContext) { this.metrics.clusterLeadershipChange.update(1); if (this.managerClusterHelixManager.isLeader()) { // can get multiple notifications on a leadership change, // so only start the application launcher the first time // the notification is received log.info("Leader notification for {} isLeader {} HM.isLeader {}", managerClusterHelixManager.getInstanceName(), isLeader, managerClusterHelixManager.isLeader()); if (!isLeader) { log.info("New Helix Controller leader {}", this.managerClusterHelixManager.getInstanceName()); cleanUpJobs(); for (LeadershipChangeAwareComponent c: this.leadershipChangeAwareComponents) { c.becomeActive(); } isLeader = true; } } else { // stop and reinitialize services since they are not restartable // this prepares them to start when this cluster manager becomes a leader if (isLeader) { isLeader = false; for (LeadershipChangeAwareComponent c: this.leadershipChangeAwareComponents) { c.becomeStandby(); } } } } /** * Delete jobs from the helix cluster */ @VisibleForTesting public void cleanUpJobs() { cleanUpJobs(this.jobClusterHelixManager); this.taskDriverHelixManager.ifPresent(this::cleanUpJobs); } private void cleanUpJobs(HelixManager helixManager) { // Clean up existing jobs TaskDriver taskDriver = new TaskDriver(helixManager); Map<String, WorkflowConfig> workflows = taskDriver.getWorkflows(); log.debug("cleanUpJobs workflow count {} workflows {}", workflows.size(), workflows.keySet()); boolean cleanupDistJobs = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.CLEAN_ALL_DIST_JOBS, GobblinClusterConfigurationKeys.DEFAULT_CLEAN_ALL_DIST_JOBS); for (Map.Entry<String, WorkflowConfig> entry : workflows.entrySet()) { String workflowName = entry.getKey(); if (workflowName.contains(GobblinClusterConfigurationKeys.PLANNING_JOB_NAME_PREFIX) || workflowName.contains(GobblinClusterConfigurationKeys.ACTUAL_JOB_NAME_PREFIX)) { if (!cleanupDistJobs) { log.info("Distributed job {} won't be deleted.", workflowName); continue; } } WorkflowConfig workflowConfig = entry.getValue(); // request delete if not already requested if (workflowConfig.getTargetState() != TargetState.DELETE) { taskDriver.delete(workflowName); log.info("Requested delete of workflowName {}", workflowName); } } } /** * A custom {@link MultiTypeMessageHandlerFactory} for {@link MessageHandler}s that handle messages of type * "SHUTDOWN" for shutting down the controller. */ private class ControllerShutdownMessageHandlerFactory implements MultiTypeMessageHandlerFactory { @Override public MessageHandler createHandler(Message message, NotificationContext context) { return new ControllerShutdownMessageHandler(message, context); } @Override public String getMessageType() { return GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE; } public List<String> getMessageTypes() { return Collections.singletonList(getMessageType()); } @Override public void reset() { } /** * A custom {@link MessageHandler} for handling messages of sub type * {@link HelixMessageSubTypes#APPLICATION_MASTER_SHUTDOWN}. */ private class ControllerShutdownMessageHandler extends MessageHandler { public ControllerShutdownMessageHandler(Message message, NotificationContext context) { super(message, context); } @Override public HelixTaskResult handleMessage() { String messageSubType = this._message.getMsgSubType(); Preconditions.checkArgument( messageSubType.equalsIgnoreCase(HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString()), String.format("Unknown %s message subtype: %s", GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE, messageSubType)); HelixTaskResult result = new HelixTaskResult(); if (stopStatus.isStopInProgress()) { result.setSuccess(true); return result; } log.info("Handling message " + HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString()); ScheduledExecutorService shutdownMessageHandlingCompletionWatcher = MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1)); // Schedule the task for watching on the removal of the shutdown message, which indicates that // the message has been successfully processed and it's safe to disconnect the HelixManager. // This is a hacky way of watching for the completion of processing the shutdown message and // should be replaced by a fix to https://issues.apache.org/jira/browse/HELIX-611. shutdownMessageHandlingCompletionWatcher.scheduleAtFixedRate(new Runnable() { @Override public void run() { HelixManager helixManager = _notificationContext.getManager(); HelixDataAccessor helixDataAccessor = helixManager.getHelixDataAccessor(); HelixProperty helixProperty = helixDataAccessor .getProperty(_message.getKey(helixDataAccessor.keyBuilder(), helixManager.getInstanceName())); // The absence of the shutdown message indicates it has been removed if (helixProperty == null) { eventBus.post(new ClusterManagerShutdownRequest()); } } }, 0, 1, TimeUnit.SECONDS); result.setSuccess(true); return result; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { log.error( String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type)); } } } /** * A custom {@link MultiTypeMessageHandlerFactory} for {@link ControllerUserDefinedMessageHandler}s that * handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}. */ static class ControllerUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory { @Override public MessageHandler createHandler(Message message, NotificationContext context) { return new ControllerUserDefinedMessageHandler(message, context); } @Override public String getMessageType() { return Message.MessageType.USER_DEFINE_MSG.toString(); } public List<String> getMessageTypes() { return Collections.singletonList(getMessageType()); } @Override public void reset() { } /** * A custom {@link MessageHandler} for handling user-defined messages to the controller. * * <p> * Currently does not handle any user-defined messages. If this class is passed a custom message, it will simply * print out a warning and return successfully. Sub-classes of {@link GobblinClusterManager} should override * {@link GobblinClusterManager#getUserDefinedMessageHandlerFactory()}. * </p> */ private static class ControllerUserDefinedMessageHandler extends MessageHandler { public ControllerUserDefinedMessageHandler(Message message, NotificationContext context) { super(message, context); } @Override public HelixTaskResult handleMessage() { log.warn(String .format("No handling setup for %s message of subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(), this._message.getMsgSubType())); HelixTaskResult helixTaskResult = new HelixTaskResult(); helixTaskResult.setSuccess(true); return helixTaskResult; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { log.error( String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type)); } } } /** * Helix related metrics */ private static class HelixManagerMetrics extends StandardMetricsBridge.StandardMetrics { public static final String CLUSTER_LEADERSHIP_CHANGE = "clusterLeadershipChange"; private final ContextAwareHistogram clusterLeadershipChange; public HelixManagerMetrics(final MetricContext metricContext, final Config config) { int timeWindowSizeInMinutes = ConfigUtils.getInt(config, ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES, ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES); this.clusterLeadershipChange = metricContext.contextAwareHistogram(CLUSTER_LEADERSHIP_CHANGE, timeWindowSizeInMinutes, TimeUnit.MINUTES); this.contextAwareMetrics.add(clusterLeadershipChange); } @Override public String getName() { return GobblinClusterManager.class.getName(); } } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { return ImmutableList.of(this.metrics); } }
2,254
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/SingleTaskRunnerMainOptions.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.PrintWriter; import java.util.Map; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; class SingleTaskRunnerMainOptions { private static final Logger logger = LoggerFactory.getLogger(SingleTaskRunnerMainOptions.class); static final String CLUSTER_CONFIG_FILE_PATH = "cluster_config_file_path"; static final String WORK_UNIT_FILE_PATH = "work_unit_file_path"; static final String JOB_ID = "job_id"; private static final ImmutableMap<String, String> OPTIONS_MAP = ImmutableMap .of(JOB_ID, "job id", WORK_UNIT_FILE_PATH, "work unit file path", CLUSTER_CONFIG_FILE_PATH, "cluster configuration file path"); private static final int CHARACTERS_PER_LINE = 80; private final PrintWriter writer; private CommandLine cmd; private Options options; SingleTaskRunnerMainOptions(final String[] args, final PrintWriter writer) { this.writer = writer; initCmdLineOptions(args); } private void initCmdLineOptions(final String[] args) { this.options = buildExpectedOptions(); try { this.cmd = new DefaultParser().parse(this.options, args); } catch (final ParseException e) { logger.error("failed to parse command options.", e); printUsage(this.options); throw new GobblinClusterException("Failed to parse command line options", e); } } private Options buildExpectedOptions() { final Options options = new Options(); for (final Map.Entry<String, String> entry : OPTIONS_MAP.entrySet()) { final Option option = Option.builder(null).required(true).longOpt(entry.getKey()).desc(entry.getValue()).hasArg().build(); options.addOption(option); } return options; } private void printUsage(final Options options) { final HelpFormatter formatter = new HelpFormatter(); formatter.printUsage(this.writer, CHARACTERS_PER_LINE, SingleTaskRunnerMain.class.getSimpleName(), options); } String getJobId() { return this.cmd.getOptionValue(JOB_ID); } String getWorkUnitFilePath() { return this.cmd.getOptionValue(WORK_UNIT_FILE_PATH); } String getClusterConfigFilePath() { return this.cmd.getOptionValue(CLUSTER_CONFIG_FILE_PATH); } }
2,255
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinTaskStateModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import org.apache.helix.HelixManager; import org.apache.helix.task.TaskFactory; import org.apache.helix.task.TaskStateModel; import org.apache.gobblin.annotation.Alpha; /** * A state model for a Gobblin task that implements all supported state transitions. * * <p> * This class is currently not used but may get used in the future if we decide to plugin our own * custom {@link TaskStateModel}. So currently this is like a place holder class. * </p> * * @author Yinan Li */ @Alpha public class GobblinTaskStateModel extends TaskStateModel { private final ScheduledExecutorService taskExecutor; public GobblinTaskStateModel(HelixManager helixManager, Map<String, TaskFactory> taskFactoryRegistry, ScheduledExecutorService taskExecutor) { super(helixManager, taskFactoryRegistry, taskExecutor); this.taskExecutor = taskExecutor; } }
2,256
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/TaskRunnerSuiteProcessModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.Collection; import java.util.List; import java.util.Map; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskFactory; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import com.google.common.util.concurrent.Service; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.instrumented.StandardMetricsBridge; /** * A sub-type of {@link TaskRunnerSuiteBase} suite which runs all tasks in separate JVMs. * * Please refer to {@link HelixTaskFactory#createNewTask(TaskCallbackContext)}. */ @Slf4j class TaskRunnerSuiteProcessModel extends TaskRunnerSuiteBase { private final HelixTaskFactory taskFactory; TaskRunnerSuiteProcessModel(TaskRunnerSuiteBase.Builder builder) { super(builder); log.info("Running a task in a separate process is enabled."); taskFactory = new HelixTaskFactory(builder.getContainerMetrics(), GobblinTaskRunner.CLUSTER_CONF_PATH, builder.getConfig()); } @Override protected Collection<StandardMetricsBridge.StandardMetrics> getMetricsCollection() { return ImmutableList.of(); } @Override protected Map<String, TaskFactory> getTaskFactoryMap() { Map<String, TaskFactory> taskFactoryMap = Maps.newHashMap(); taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_TASK_FACTORY_NAME, taskFactory); //TODO: taskFactoryMap.put(GOBBLIN_JOB_FACTORY_NAME, jobFactory); return taskFactoryMap; } @Override protected List<Service> getServices() { return this.services; } }
2,257
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixJobTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixException; import org.apache.helix.HelixManager; import org.apache.helix.task.Task; import org.apache.helix.task.TaskCallbackContext; import org.apache.helix.task.TaskConfig; import org.apache.helix.task.TaskResult; import com.google.common.collect.ImmutableMap; import com.google.common.io.Closer; import com.typesafe.config.Config; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.ContextAwareTimer; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.runtime.JobException; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.PropertiesUtils; /** * An implementation of Helix's {@link org.apache.helix.task.Task} that runs original {@link GobblinHelixJobLauncher}. */ @Slf4j class GobblinHelixJobTask implements Task { private final TaskConfig taskConfig; private final Config sysConfig; private final Properties jobPlusSysConfig; private final HelixJobsMapping jobsMapping; private final String applicationName; private final String instanceName; private final String planningJobId; private final HelixManager jobHelixManager; private final Path appWorkDir; private final String jobUri; private final List<? extends Tag<?>> metadataTags; private GobblinHelixJobLauncher launcher; private GobblinHelixJobTaskMetrics jobTaskMetrics; private GobblinHelixMetrics helixMetrics; private GobblinHelixJobLauncherListener jobLauncherListener; public GobblinHelixJobTask (TaskCallbackContext context, HelixJobsMapping jobsMapping, TaskRunnerSuiteBase.Builder builder, GobblinHelixJobLauncherMetrics launcherMetrics, GobblinHelixJobTaskMetrics jobTaskMetrics, GobblinHelixMetrics helixMetrics) { this.applicationName = builder.getApplicationName(); this.instanceName = builder.getInstanceName(); this.jobTaskMetrics = jobTaskMetrics; this.helixMetrics = helixMetrics; this.taskConfig = context.getTaskConfig(); this.sysConfig = builder.getConfig(); this.jobHelixManager = builder.getJobHelixManager(); this.jobPlusSysConfig = ConfigUtils.configToProperties(sysConfig); this.jobLauncherListener = new GobblinHelixJobLauncherListener(launcherMetrics); Map<String, String> configMap = this.taskConfig.getConfigMap(); for (Map.Entry<String, String> entry: configMap.entrySet()) { if (entry.getKey().startsWith(GobblinHelixDistributeJobExecutionLauncher.JOB_PROPS_PREFIX)) { String key = entry.getKey().replaceFirst(GobblinHelixDistributeJobExecutionLauncher.JOB_PROPS_PREFIX, ""); jobPlusSysConfig.put(key, entry.getValue()); } } if (!jobPlusSysConfig.containsKey(GobblinClusterConfigurationKeys.PLANNING_ID_KEY)) { throw new RuntimeException("Job doesn't have planning ID"); } this.jobUri = jobPlusSysConfig.getProperty(GobblinClusterConfigurationKeys.JOB_SPEC_URI); this.planningJobId = jobPlusSysConfig.getProperty(GobblinClusterConfigurationKeys.PLANNING_ID_KEY); this.jobsMapping = jobsMapping; this.appWorkDir = builder.getAppWorkPath(); this.metadataTags = Tag.fromMap(new ImmutableMap.Builder<String, Object>() .put(GobblinClusterMetricTagNames.APPLICATION_NAME, builder.getApplicationName()) .put(GobblinClusterMetricTagNames.APPLICATION_ID, builder.getApplicationId()) .build()); } static class GobblinHelixJobTaskMetrics extends StandardMetricsBridge.StandardMetrics { static final String TIME_BETWEEN_JOB_SUBMISSION_AND_EXECUTION = "timeBetweenJobSubmissionAndExecution"; final ContextAwareTimer timeBetweenJobSubmissionAndExecution; public GobblinHelixJobTaskMetrics(MetricContext metricContext, int windowSizeInMin) { timeBetweenJobSubmissionAndExecution = metricContext.contextAwareTimer(TIME_BETWEEN_JOB_SUBMISSION_AND_EXECUTION, windowSizeInMin, TimeUnit.MINUTES); this.contextAwareMetrics.add(timeBetweenJobSubmissionAndExecution); } public void updateTimeBetweenJobSubmissionAndExecution(Properties jobProps) { long jobSubmitTime = Long.parseLong(jobProps.getProperty(GobblinClusterConfigurationKeys.PLANNING_JOB_CREATE_TIME, "0")); if (jobSubmitTime != 0) { Instrumented.updateTimer(com.google.common.base.Optional.of(this.timeBetweenJobSubmissionAndExecution), System.currentTimeMillis() - jobSubmitTime, TimeUnit.MILLISECONDS); } } } private GobblinHelixJobLauncher createJobLauncher() throws Exception { return new GobblinHelixJobLauncher(jobPlusSysConfig, this.jobHelixManager, this.appWorkDir, this.metadataTags, new ConcurrentHashMap<>(), Optional.of(this.helixMetrics)); } /** * Launch the actual {@link GobblinHelixJobLauncher}. */ @SneakyThrows @Override public TaskResult run() { log.info("Running planning job {} [{} {}]", this.planningJobId, this.applicationName, this.instanceName); this.jobTaskMetrics.updateTimeBetweenJobSubmissionAndExecution(this.jobPlusSysConfig); this.jobHelixManager.connect(); try (Closer closer = Closer.create()) { Optional<String> planningIdFromStateStore = this.jobsMapping.getPlanningJobId(jobUri); long timeOut = PropertiesUtils.getPropAsLong(jobPlusSysConfig, GobblinClusterConfigurationKeys.HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS) * 1000; if (planningIdFromStateStore.isPresent() && !planningIdFromStateStore.get().equals(this.planningJobId)) { return new TaskResult(TaskResult.Status.FAILED, "Exception occurred for job " + planningJobId + ": because planning job in state store has different id (" + planningIdFromStateStore.get() + ")"); } while (true) { Optional<String> actualJobIdFromStateStore = this.jobsMapping.getActualJobId(jobUri); if (actualJobIdFromStateStore.isPresent()) { String previousActualJobId = actualJobIdFromStateStore.get(); if (HelixUtils.isJobFinished(previousActualJobId, previousActualJobId, this.jobHelixManager)) { log.info("Previous actual job {} [plan: {}] finished, will launch a new job.", previousActualJobId, this.planningJobId); } else { log.info("Previous actual job {} [plan: {}] not finished, kill it now.", previousActualJobId, this.planningJobId); try { HelixUtils.deleteWorkflow(previousActualJobId, this.jobHelixManager, timeOut); } catch (HelixException e) { log.error("Helix cannot delete previous actual job id {} within {} seconds.", previousActualJobId, timeOut / 1000, e); return new TaskResult(TaskResult.Status.FAILED, ExceptionUtils.getFullStackTrace(e)); } } } else { log.info("No previous actual job [plan: {}]. First time run.", this.planningJobId); } String actualJobId = HelixJobsMapping.createActualJobId(jobPlusSysConfig); log.info("Planning job {} creates actual job {}", this.planningJobId, actualJobId); this.jobPlusSysConfig.setProperty(ConfigurationKeys.JOB_ID_KEY, actualJobId); this.launcher = createJobLauncher(); this.jobsMapping.setActualJobId(jobUri, this.planningJobId, this.launcher.getJobId()); closer.register(launcher).launchJob(this.jobLauncherListener); if (!this.launcher.isEarlyStopped()) { break; } else { log.info("Planning job {} has more runs due to early stop.", this.planningJobId); } } log.info("Completing planning job {}", this.planningJobId); return new TaskResult(TaskResult.Status.COMPLETED, ""); } catch (Exception e) { log.warn("Failing planning job {}", this.planningJobId, e); return new TaskResult(TaskResult.Status.FAILED, "Exception occurred for job " + planningJobId + ":" + ExceptionUtils .getFullStackTrace(e)); } finally { this.jobHelixManager.disconnect(); // always cleanup the job mapping for current job name. try { this.jobsMapping.deleteMapping(jobUri); } catch (Exception e) { log.warn("Failed to delete jobs mapping for job: {}", jobUri, e); return new TaskResult(TaskResult.Status.FAILED,"Cannot delete jobs mapping for job : " + jobUri); } } } @Override public void cancel() { log.info("Cancelling planning job {}", this.planningJobId); if (launcher != null) { try { // this cancel should cancel the helix job which run method submitted, right? launcher.cancelJob(this.jobLauncherListener); } catch (JobException e) { throw new RuntimeException("Unable to cancel planning job " + this.planningJobId + ": ", e); } finally { // always cleanup the job mapping for current job name. try { this.jobsMapping.deleteMapping(jobUri); } catch (Exception e) { throw new RuntimeException("Cannot delete jobs mapping for job : " + jobUri, e); } } } } }
2,258
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/StreamingJobConfigurationManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.List; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; import com.google.common.eventbus.EventBus; import com.google.common.util.concurrent.Service; import com.typesafe.config.Config; import lombok.Getter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecConsumer; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.ExecutorsUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * A {@link JobConfigurationManager} that fetches job specs from a {@link SpecConsumer} in a loop * without */ @Alpha public class StreamingJobConfigurationManager extends JobConfigurationManager { private static final Logger LOGGER = LoggerFactory.getLogger(StreamingJobConfigurationManager.class); private final ExecutorService fetchJobSpecExecutor; @Getter private final SpecConsumer specConsumer; private final long stopTimeoutSeconds; public StreamingJobConfigurationManager(EventBus eventBus, Config config, MutableJobCatalog jobCatalog) { super(eventBus, config); this.stopTimeoutSeconds = ConfigUtils.getLong(config, GobblinClusterConfigurationKeys.STOP_TIMEOUT_SECONDS, GobblinClusterConfigurationKeys.DEFAULT_STOP_TIMEOUT_SECONDS); this.fetchJobSpecExecutor = Executors.newSingleThreadExecutor( ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("FetchJobSpecExecutor"))); String specExecutorInstanceConsumerClassName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.SPEC_CONSUMER_CLASS_KEY, GobblinClusterConfigurationKeys.DEFAULT_STREAMING_SPEC_CONSUMER_CLASS); LOGGER.info("Using SpecConsumer ClassNameclass name/alias " + specExecutorInstanceConsumerClassName); try { ClassAliasResolver<SpecConsumer> aliasResolver = new ClassAliasResolver<>(SpecConsumer.class); this.specConsumer = (SpecConsumer) GobblinConstructorUtils.invokeFirstConstructor( Class.forName(aliasResolver.resolve(specExecutorInstanceConsumerClassName)), ImmutableList.<Object>of(config, jobCatalog), ImmutableList.<Object>of(config)); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) { throw new RuntimeException("Could not construct SpecConsumer " + specExecutorInstanceConsumerClassName, e); } } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { if (this.specConsumer instanceof StandardMetricsBridge) { return ((StandardMetricsBridge)specConsumer).getStandardMetricsCollection(); } else { return ImmutableList.of(); } } @Override protected void startUp() throws Exception { LOGGER.info("Starting the " + StreamingJobConfigurationManager.class.getSimpleName()); // submit command to fetch job specs this.fetchJobSpecExecutor.execute(new Runnable() { @Override public void run() { try { while(true) { fetchJobSpecs(); } } catch (InterruptedException e) { LOGGER.info("Fetch thread interrupted... will exit"); } catch (ExecutionException e) { LOGGER.error("Failed to fetch job specs", e); throw new RuntimeException("Failed to fetch specs", e); } } }); // if the instance consumer is a service then need to start it to consume job specs // IMPORTANT: StreamingKafkaSpecConsumer needs to be launched after a fetching thread is created. // This is because StreamingKafkaSpecConsumer will invoke addListener(new JobSpecListener()) during startup, // which will push job specs into a blocking queue _jobSpecQueue. A fetching thread will help to consume the // blocking queue to prevent a hanging issue. if (this.specConsumer instanceof Service) { ((Service) this.specConsumer).startAsync().awaitRunning(); } } private void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this.specConsumer.changedSpecs().get(); // propagate thread interruption so that caller will exit from loop if (Thread.interrupted()) { throw new InterruptedException(); } for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) { SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete Spec anonymousSpec = entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); } else if (verb.equals(SpecExecutor.Verb.CANCEL)) { Spec anonymousSpec = entry.getValue(); postCancelJobConfigArrival(anonymousSpec.getUri().toString()); } } } @Override protected void shutDown() throws Exception { if (this.specConsumer instanceof Service) { ((Service) this.specConsumer).stopAsync().awaitTerminated(this.stopTimeoutSeconds, TimeUnit.SECONDS); } ExecutorsUtils.shutdownExecutorService(this.fetchJobSpecExecutor, Optional.of(LOGGER)); } }
2,259
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinReferenceCountingZkHelixManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.util.concurrent.atomic.AtomicInteger; import org.apache.helix.InstanceType; import org.apache.helix.manager.zk.ZKHelixManager; import lombok.extern.slf4j.Slf4j; /** * A {@link ZKHelixManager} which keeps a reference count of users. * Every user should call connect and disconnect to increase and decrease the count. * Calls to connect and disconnect to the underlying ZKHelixManager are made only for the first and last usage respectively. */ @Slf4j public class GobblinReferenceCountingZkHelixManager extends ZKHelixManager { private final AtomicInteger usageCount = new AtomicInteger(0); public GobblinReferenceCountingZkHelixManager(String clusterName, String instanceName, InstanceType instanceType, String zkAddress) { super(clusterName, instanceName, instanceType, zkAddress); } @Override public void connect() throws Exception { if (usageCount.incrementAndGet() == 1) { super.connect(); } } @Override public void disconnect() { if (usageCount.decrementAndGet() <= 0) { super.disconnect(); } } }
2,260
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixUnexpectedStateException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; /** * Exception to describe situations where Gobblin sees unexpected state from Helix. Historically, we've seen unexpected * null values, which bubble up as NPE. This exception is explicitly used to differentiate bad Gobblin code from * Helix failures (i.e. seeing a NPE implies Gobblin bug) */ public class GobblinHelixUnexpectedStateException extends Exception { public GobblinHelixUnexpectedStateException(String message, Object... args) { super(String.format(message, args)); } }
2,261
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinTaskRunner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.helix.HelixAdmin; import org.apache.helix.HelixDataAccessor; import org.apache.helix.HelixException; import org.apache.helix.HelixManager; import org.apache.helix.HelixProperty; import org.apache.helix.InstanceType; import org.apache.helix.NotificationContext; import org.apache.helix.manager.zk.ZKHelixAdmin; import org.apache.helix.messaging.handling.HelixTaskResult; import org.apache.helix.messaging.handling.MessageHandler; import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory; import org.apache.helix.model.Message; import org.apache.helix.task.TaskFactory; import org.apache.helix.task.TaskStateModelFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.collect.Lists; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.Service; import com.google.common.util.concurrent.ServiceManager; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import lombok.Getter; import lombok.Setter; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.broker.SharedResourcesBrokerFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.instrumented.StandardMetricsBridge; import org.apache.gobblin.metrics.GobblinMetrics; import org.apache.gobblin.metrics.MultiReporterException; import org.apache.gobblin.metrics.RootMetricContext; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.metrics.event.GobblinEventBuilder; import org.apache.gobblin.metrics.reporter.util.MetricReportUtils; import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.FileUtils; import org.apache.gobblin.util.HadoopUtils; import org.apache.gobblin.util.JvmUtils; import org.apache.gobblin.util.TaskEventMetadataUtils; import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent; import org.apache.gobblin.util.eventbus.EventBusFactory; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; /** * The main class running in the containers managing services for running Gobblin * {@link org.apache.gobblin.source.workunit.WorkUnit}s. * * <p> * This class presents a Helix participant that uses a {@link HelixManager} to communicate with Helix. * It uses Helix task execution framework and details are encapsulated in {@link TaskRunnerSuiteBase}. * </p> * * <p> * This class responds to a graceful shutdown initiated by the {@link GobblinClusterManager} via * a Helix message of subtype {@link HelixMessageSubTypes#WORK_UNIT_RUNNER_SHUTDOWN}, or it does a * graceful shutdown when the shutdown hook gets called. In both cases, {@link #stop()} will be * called to start the graceful shutdown. * </p> * * <p> * If for some reason, the container exits or gets killed, the {@link GobblinClusterManager} will * be notified for the completion of the container and will start a new container to replace this one. * </p> * * @author Yinan Li */ @Alpha public class GobblinTaskRunner implements StandardMetricsBridge { // Working directory key for applications. This config is set dynamically. public static final String CLUSTER_APP_WORK_DIR = GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX + "appWorkDir"; private static final Logger logger = LoggerFactory.getLogger(GobblinTaskRunner.class); static final java.nio.file.Path CLUSTER_CONF_PATH = Paths.get("generated-gobblin-cluster.conf"); static final String GOBBLIN_TASK_FACTORY_NAME = "GobblinTaskFactory"; static final String GOBBLIN_JOB_FACTORY_NAME = "GobblinJobFactory"; private final String helixInstanceName; private final String clusterName; private final Optional<ContainerMetrics> containerMetrics; private final List<Service> services = Lists.newArrayList(); private final Path appWorkPath; //An EventBus instance that can be accessed from any component running within the worker process. The individual components can // use the EventBus stream to communicate back application level health check results to the // GobblinTaskRunner. private final EventBus containerHealthEventBus; @Getter private HelixManager jobHelixManager; private Optional<HelixManager> taskDriverHelixManager = Optional.absent(); private ServiceManager serviceManager; private TaskStateModelFactory taskStateModelFactory; private boolean isTaskDriver; private boolean dedicatedTaskDriverCluster; private boolean isContainerExitOnHealthCheckFailureEnabled; private Collection<StandardMetricsBridge.StandardMetrics> metricsCollection; @Getter private volatile boolean started = false; private volatile boolean stopInProgress = false; private volatile boolean isStopped = false; @Getter @Setter private volatile boolean healthCheckFailed = false; protected final String taskRunnerId; protected final EventBus eventBus = new EventBus(GobblinTaskRunner.class.getSimpleName()); protected final Config clusterConfig; @Getter protected final FileSystem fs; protected final String applicationName; protected final String applicationId; private final boolean isMetricReportingFailureFatal; private final boolean isEventReportingFailureFatal; public GobblinTaskRunner(String applicationName, String helixInstanceName, String applicationId, String taskRunnerId, Config config, Optional<Path> appWorkDirOptional) throws Exception { // Set system properties passed in via application config. As an example, Helix uses System#getProperty() for ZK configuration // overrides such as sessionTimeout. In this case, the overrides specified // in the application configuration have to be extracted and set before initializing HelixManager. GobblinClusterUtils.setSystemProperties(config); //Add dynamic config config = GobblinClusterUtils.addDynamicConfig(config); this.isTaskDriver = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.TASK_DRIVER_ENABLED,false); this.helixInstanceName = helixInstanceName; this.taskRunnerId = taskRunnerId; this.applicationName = applicationName; this.applicationId = applicationId; this.dedicatedTaskDriverCluster = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.DEDICATED_TASK_DRIVER_CLUSTER_ENABLED, false); Configuration conf = HadoopUtils.newConfiguration(); this.fs = GobblinClusterUtils.buildFileSystem(config, conf); this.appWorkPath = initAppWorkDir(config, appWorkDirOptional); this.clusterConfig = saveConfigToFile(config); this.clusterName = this.clusterConfig.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY); this.isMetricReportingFailureFatal = ConfigUtils.getBoolean(this.clusterConfig, ConfigurationKeys.GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL, ConfigurationKeys.DEFAULT_GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL); this.isEventReportingFailureFatal = ConfigUtils.getBoolean(this.clusterConfig, ConfigurationKeys.GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL, ConfigurationKeys.DEFAULT_GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL); logger.info("Configured GobblinTaskRunner work dir to: {}", this.appWorkPath.toString()); this.isContainerExitOnHealthCheckFailureEnabled = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED, GobblinClusterConfigurationKeys.DEFAULT_CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED); if (this.isContainerExitOnHealthCheckFailureEnabled) { EventBus eventBus; try { eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME, SharedResourcesBrokerFactory.getImplicitBroker()); } catch (IOException e) { logger.error("Could not find EventBus instance for container health check", e); eventBus = null; } this.containerHealthEventBus = eventBus; } else { this.containerHealthEventBus = null; } initHelixManager(); this.containerMetrics = buildContainerMetrics(); logger.info("GobblinTaskRunner({}): applicationName {}, helixInstanceName {}, applicationId {}, taskRunnerId {}, config {}, appWorkDir {}", this.isTaskDriver ? "taskDriver" : "worker", applicationName, helixInstanceName, applicationId, taskRunnerId, config, appWorkDirOptional); } private TaskRunnerSuiteBase initTaskRunnerSuiteBase() throws ReflectiveOperationException { String builderStr = ConfigUtils.getString(this.clusterConfig, GobblinClusterConfigurationKeys.TASK_RUNNER_SUITE_BUILDER, TaskRunnerSuiteBase.Builder.class.getName()); String hostName = ""; try { hostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { logger.warn("Cannot find host name for Helix instance: {}", this.helixInstanceName); } TaskRunnerSuiteBase.Builder builder = GobblinConstructorUtils.<TaskRunnerSuiteBase.Builder>invokeLongestConstructor( new ClassAliasResolver(TaskRunnerSuiteBase.Builder.class) .resolveClass(builderStr), this.clusterConfig); return builder.setAppWorkPath(this.appWorkPath) .setContainerMetrics(this.containerMetrics) .setFileSystem(this.fs) .setJobHelixManager(this.jobHelixManager) .setApplicationId(applicationId) .setApplicationName(applicationName) .setInstanceName(helixInstanceName) .setContainerId(taskRunnerId) .setHostName(hostName) .build(); } private Path initAppWorkDir(Config config, Optional<Path> appWorkDirOptional) { return appWorkDirOptional.isPresent() ? appWorkDirOptional.get() : GobblinClusterUtils .getAppWorkDirPathFromConfig(config, this.fs, this.applicationName, this.applicationId); } private void initHelixManager() { String zkConnectionString = this.clusterConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); logger.info("Using ZooKeeper connection string: " + zkConnectionString); if (this.isTaskDriver && this.dedicatedTaskDriverCluster) { // This will create a Helix manager to receive the planning job this.taskDriverHelixManager = Optional.of(GobblinHelixManagerFactory.getZKHelixManager( ConfigUtils.getString(this.clusterConfig, GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY, ""), this.helixInstanceName, InstanceType.PARTICIPANT, zkConnectionString)); this.jobHelixManager = GobblinHelixManagerFactory.getZKHelixManager( this.clusterName, this.helixInstanceName, InstanceType.ADMINISTRATOR, zkConnectionString); } else { this.jobHelixManager = GobblinHelixManagerFactory.getZKHelixManager( this.clusterName, this.helixInstanceName, InstanceType.PARTICIPANT, zkConnectionString); } } private HelixManager getReceiverManager() { return taskDriverHelixManager.isPresent() ? taskDriverHelixManager.get() : this.jobHelixManager; } private TaskStateModelFactory createTaskStateModelFactory(Map<String, TaskFactory> taskFactoryMap) { HelixManager receiverManager = getReceiverManager(); TaskStateModelFactory taskStateModelFactory = new TaskStateModelFactory(receiverManager, taskFactoryMap); receiverManager.getStateMachineEngine() .registerStateModelFactory("Task", taskStateModelFactory); return taskStateModelFactory; } private Config saveConfigToFile(Config config) throws IOException { Config newConf = config .withValue(CLUSTER_APP_WORK_DIR, ConfigValueFactory.fromAnyRef(this.appWorkPath.toString())); ConfigUtils configUtils = new ConfigUtils(new FileUtils()); configUtils.saveConfigToFile(newConf, CLUSTER_CONF_PATH); return newConf; } /** * Start this {@link GobblinTaskRunner} instance. */ public void start() throws ContainerHealthCheckException { logger.info(String.format("Starting %s in container %s", this.helixInstanceName, this.taskRunnerId)); // Add a shutdown hook so the task scheduler gets properly shutdown addShutdownHook(); connectHelixManagerWithRetry(); TaskRunnerSuiteBase suite; try { suite = initTaskRunnerSuiteBase(); synchronized (this) { this.taskStateModelFactory = createTaskStateModelFactory(suite.getTaskFactoryMap()); } } catch (Exception e) { throw new RuntimeException(e); } this.metricsCollection = suite.getMetricsCollection(); this.services.addAll(suite.getServices()); this.services.addAll(getServices()); if (this.services.isEmpty()) { this.serviceManager = null; } else { this.serviceManager = new ServiceManager(services); } addInstanceTags(); // Start metric reporting initMetricReporter(); if (this.containerHealthEventBus != null) { //Register itself with the container health event bus instance to receive container health events logger.info("Registering GobblinTaskRunner with ContainerHealthCheckEventBus.."); this.containerHealthEventBus.register(this); } if (this.serviceManager != null) { this.serviceManager.startAsync(); started = true; this.serviceManager.awaitStopped(); } else { started = true; } //Check if the TaskRunner shutdown is invoked due to a health check failure. If yes, throw a RuntimeException // that will be propagated to the caller. if (this.isContainerExitOnHealthCheckFailureEnabled && GobblinTaskRunner.this.isHealthCheckFailed()) { logger.error("GobblinTaskRunner finished due to health check failure."); throw new ContainerHealthCheckException(); } } private void initMetricReporter() { if (this.containerMetrics.isPresent()) { try { this.containerMetrics.get() .startMetricReportingWithFileSuffix(ConfigUtils.configToState(this.clusterConfig), this.taskRunnerId); } catch (MultiReporterException ex) { if (MetricReportUtils.shouldThrowException(logger, ex, this.isMetricReportingFailureFatal, this.isEventReportingFailureFatal)) { throw new RuntimeException(ex); } } } } public synchronized void stop() { if (this.isStopped) { logger.info("Gobblin Task runner is already stopped."); return; } if (this.stopInProgress) { logger.info("Gobblin Task runner stop already in progress."); return; } this.stopInProgress = true; logger.info("Stopping the Gobblin Task runner"); // Stop metric reporting if (this.containerMetrics.isPresent()) { this.containerMetrics.get().stopMetricsReporting(); } try { stopServices(); } finally { logger.info("All services are stopped."); this.taskStateModelFactory.shutdown(); disconnectHelixManager(); } this.isStopped = true; } private void stopServices() { if (this.serviceManager != null) { try { // Give the services 5 minutes to stop to ensure that we are responsive to shutdown requests this.serviceManager.stopAsync().awaitStopped(5, TimeUnit.MINUTES); } catch (TimeoutException te) { logger.error("Timeout in stopping the service manager", te); } } } /** * Creates and returns a {@link List} of additional {@link Service}s that should be run in this * {@link GobblinTaskRunner}. Sub-classes that need additional {@link Service}s to run, should override this method * * @return a {@link List} of additional {@link Service}s to run. */ protected List<Service> getServices() { List<Service> serviceList = new ArrayList<>(); if (ConfigUtils.getBoolean(this.clusterConfig, GobblinClusterConfigurationKeys.CONTAINER_HEALTH_METRICS_SERVICE_ENABLED, GobblinClusterConfigurationKeys.DEFAULT_CONTAINER_HEALTH_METRICS_SERVICE_ENABLED)) { serviceList.add(new ContainerHealthMetricsService(clusterConfig)); } return serviceList; } @VisibleForTesting boolean isStopped() { return this.isStopped; } @VisibleForTesting void connectHelixManager() throws Exception { this.jobHelixManager.connect(); if (!(this.isTaskDriver && this.dedicatedTaskDriverCluster)) { // Ensure the instance is enabled when jobHelixManager is a PARTICIPANT this.jobHelixManager.getClusterManagmentTool().enableInstance(clusterName, helixInstanceName, true); } this.jobHelixManager.getMessagingService() .registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE, new ParticipantShutdownMessageHandlerFactory()); this.jobHelixManager.getMessagingService() .registerMessageHandlerFactory(Message.MessageType.USER_DEFINE_MSG.toString(), getUserDefinedMessageHandlerFactory()); if (this.taskDriverHelixManager.isPresent()) { this.taskDriverHelixManager.get().connect(); //Ensure the instance is enabled. this.taskDriverHelixManager.get().getClusterManagmentTool().enableInstance(this.taskDriverHelixManager.get().getClusterName(), helixInstanceName, true); } } /** * A method to handle failures joining Helix cluster. The method will perform the following steps before attempting * to re-join the cluster: * <li> * <ul>Disconnect from Helix cluster, which would close any open clients</ul> * <ul>Drop instance from Helix cluster, to remove any partial instance structure from Helix</ul> * <ul>Re-construct helix manager instances, used to re-join the cluster</ul> * </li> */ private void onClusterJoinFailure() { logger.warn("Disconnecting Helix manager.."); disconnectHelixManager(); HelixAdmin admin = new ZKHelixAdmin(clusterConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY)); //Drop the helix Instance logger.warn("Dropping instance: {} from cluster: {}", helixInstanceName, clusterName); HelixUtils.dropInstanceIfExists(admin, clusterName, helixInstanceName); if (this.taskDriverHelixManager.isPresent()) { String taskDriverCluster = clusterConfig.getString(GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY); logger.warn("Dropping instance: {} from task driver cluster: {}", helixInstanceName, taskDriverCluster); HelixUtils.dropInstanceIfExists(admin, clusterName, helixInstanceName); } admin.close(); logger.warn("Reinitializing Helix manager.."); initHelixManager(); } @VisibleForTesting void connectHelixManagerWithRetry() { Callable<Void> connectHelixManagerCallable = () -> { try { logger.info("Instance: {} attempting to join cluster: {}", helixInstanceName, clusterName); connectHelixManager(); } catch (HelixException e) { logger.error("Exception encountered when joining cluster", e); onClusterJoinFailure(); throw e; } return null; }; Retryer<Void> retryer = RetryerBuilder.<Void>newBuilder() .retryIfException() .withStopStrategy(StopStrategies.stopAfterAttempt(5)).build(); try { retryer.call(connectHelixManagerCallable); } catch (ExecutionException | RetryException e) { Throwables.propagate(e); } } /** * Helix participant cannot pre-configure tags before it connects to ZK. So this method can only be invoked after * {@link HelixManager#connect()}. However this will still work because tagged jobs won't be sent to a non-tagged instance. Hence * the job with EXAMPLE_INSTANCE_TAG will remain in the ZK until an instance with EXAMPLE_INSTANCE_TAG was found. */ private void addInstanceTags() { HelixManager receiverManager = getReceiverManager(); if (receiverManager.isConnected()) { try { Set<String> desiredTags = new HashSet<>( ConfigUtils.getStringList(this.clusterConfig, GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY)); if (!desiredTags.isEmpty()) { // The helix instance associated with this container should be consistent on helix tag List<String> existedTags = receiverManager.getClusterManagmentTool() .getInstanceConfig(this.clusterName, this.helixInstanceName).getTags(); // Remove tag assignments for the current Helix instance from a previous run for (String tag : existedTags) { if (!desiredTags.contains(tag)) { receiverManager.getClusterManagmentTool().removeInstanceTag(this.clusterName, this.helixInstanceName, tag); logger.info("Removed unrelated helix tag {} for instance {}", tag, this.helixInstanceName); } } desiredTags.forEach(desiredTag -> receiverManager.getClusterManagmentTool() .addInstanceTag(this.clusterName, this.helixInstanceName, desiredTag)); logger.info("Actual tags binding " + receiverManager.getClusterManagmentTool() .getInstanceConfig(this.clusterName, this.helixInstanceName).getTags()); } } catch (HelixException e) { logger.warn("Error with Helix getting instance config tags used in YARN cluster configuration. Ensure YARN is being used. Will ignore and attempt to move on {}", e); } } } /** * Creates and returns a {@link MultiTypeMessageHandlerFactory} for handling of Helix * {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}s. * * @returns a {@link MultiTypeMessageHandlerFactory}. */ protected MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() { return new ParticipantUserDefinedMessageHandlerFactory(); } @VisibleForTesting void disconnectHelixManager() { if (this.jobHelixManager.isConnected()) { this.jobHelixManager.disconnect(); } if (this.taskDriverHelixManager.isPresent()) { this.taskDriverHelixManager.get().disconnect(); } } private void addShutdownHook() { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { logger.info("Running the shutdown hook"); GobblinTaskRunner.this.stop(); } }); } private Optional<ContainerMetrics> buildContainerMetrics() { Properties properties = ConfigUtils.configToProperties(this.clusterConfig); if (GobblinMetrics.isEnabled(properties)) { logger.info("Container metrics are enabled"); return Optional.of(ContainerMetrics .get(ConfigUtils.configToState(clusterConfig), this.applicationName, this.taskRunnerId)); } else { return Optional.absent(); } } @Override public Collection<StandardMetrics> getStandardMetricsCollection() { return this.metricsCollection; } /** * A custom {@link MultiTypeMessageHandlerFactory} for {@link ParticipantShutdownMessageHandler}s that handle messages * of type "SHUTDOWN" for shutting down the participants. */ private class ParticipantShutdownMessageHandlerFactory implements MultiTypeMessageHandlerFactory { @Override public MessageHandler createHandler(Message message, NotificationContext context) { return new ParticipantShutdownMessageHandler(message, context); } @Override public String getMessageType() { return GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE; } public List<String> getMessageTypes() { return Collections.singletonList(getMessageType()); } @Override public void reset() { } /** * A custom {@link MessageHandler} for handling messages of sub type * {@link HelixMessageSubTypes#WORK_UNIT_RUNNER_SHUTDOWN}. */ private class ParticipantShutdownMessageHandler extends MessageHandler { public ParticipantShutdownMessageHandler(Message message, NotificationContext context) { super(message, context); } @Override public HelixTaskResult handleMessage() { String messageSubType = this._message.getMsgSubType(); Preconditions.checkArgument(messageSubType .equalsIgnoreCase(HelixMessageSubTypes.WORK_UNIT_RUNNER_SHUTDOWN.toString()), String .format("Unknown %s message subtype: %s", GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE, messageSubType)); HelixTaskResult result = new HelixTaskResult(); if (stopInProgress) { result.setSuccess(true); return result; } logger.info("Handling message " + HelixMessageSubTypes.WORK_UNIT_RUNNER_SHUTDOWN.toString()); ScheduledExecutorService shutdownMessageHandlingCompletionWatcher = MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1)); // Schedule the task for watching on the removal of the shutdown message, which indicates that // the message has been successfully processed and it's safe to disconnect the HelixManager. // This is a hacky way of watching for the completion of processing the shutdown message and // should be replaced by a fix to https://issues.apache.org/jira/browse/HELIX-611. shutdownMessageHandlingCompletionWatcher.scheduleAtFixedRate(new Runnable() { @Override public void run() { HelixManager helixManager = _notificationContext.getManager(); HelixDataAccessor helixDataAccessor = helixManager.getHelixDataAccessor(); HelixProperty helixProperty = helixDataAccessor.getProperty( _message.getKey(helixDataAccessor.keyBuilder(), helixManager.getInstanceName())); // The absence of the shutdown message indicates it has been removed if (helixProperty == null) { GobblinTaskRunner.this.stop(); } } }, 0, 1, TimeUnit.SECONDS); result.setSuccess(true); return result; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { logger.error(String .format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type)); } } } /** * A custom {@link MultiTypeMessageHandlerFactory} for {@link ParticipantUserDefinedMessageHandler}s that * handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}. */ private static class ParticipantUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory { @Override public MessageHandler createHandler(Message message, NotificationContext context) { return new ParticipantUserDefinedMessageHandler(message, context); } @Override public String getMessageType() { return Message.MessageType.USER_DEFINE_MSG.toString(); } public List<String> getMessageTypes() { return Collections.singletonList(getMessageType()); } @Override public void reset() { } /** * A custom {@link MessageHandler} for handling user-defined messages to the controller. * * <p> * Currently does not handle any user-defined messages. If this class is passed a custom message, it will simply * print out a warning and return successfully. Sub-classes of {@link GobblinClusterManager} should override * {@link #getUserDefinedMessageHandlerFactory}. * </p> */ private static class ParticipantUserDefinedMessageHandler extends MessageHandler { public ParticipantUserDefinedMessageHandler(Message message, NotificationContext context) { super(message, context); } @Override public HelixTaskResult handleMessage() { logger.warn(String.format("No handling setup for %s message of subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(), this._message.getMsgSubType())); HelixTaskResult helixTaskResult = new HelixTaskResult(); helixTaskResult.setSuccess(true); return helixTaskResult; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { logger.error(String .format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type)); } } } @Subscribe public void handleContainerHealthCheckFailureEvent(ContainerHealthCheckFailureEvent event) { logger.error("Received {} from: {}", event.getClass().getSimpleName(), event.getClassName()); logger.error("Submitting a ContainerHealthCheckFailureEvent.."); submitEvent(event); logger.error("Stopping GobblinTaskRunner..."); GobblinTaskRunner.this.setHealthCheckFailed(true); GobblinTaskRunner.this.stop(); } private void submitEvent(ContainerHealthCheckFailureEvent event) { EventSubmitter eventSubmitter = new EventSubmitter.Builder(RootMetricContext.get(), getClass().getPackage().getName()).build(); GobblinEventBuilder eventBuilder = new GobblinEventBuilder(event.getClass().getSimpleName()); State taskState = ConfigUtils.configToState(event.getConfig()); //Add task metadata such as Helix taskId, containerId, and workflowId if configured TaskEventMetadataGenerator taskEventMetadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(taskState); eventBuilder.addAdditionalMetadata(taskEventMetadataGenerator.getMetadata(taskState, event.getClass().getSimpleName())); eventBuilder.addAdditionalMetadata(event.getMetadata()); eventSubmitter.submit(eventBuilder); } private static String getApplicationId() { return "1"; } private static String getTaskRunnerId() { return UUID.randomUUID().toString(); } public static Options buildOptions() { Options options = new Options(); options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Application name"); options.addOption("d", GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME, true, "Application id"); options.addOption("i", GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME, true, "Helix instance name"); options.addOption(Option.builder("t").longOpt(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME) .hasArg(true).required(false).desc("Helix instance tags").build()); return options; } public static void printUsage(Options options) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(GobblinClusterManager.class.getSimpleName(), options); } public static void main(String[] args) throws Exception { Options options = buildOptions(); try { CommandLine cmd = new DefaultParser().parse(options, args); if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) || !cmd .hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)) { printUsage(options); System.exit(1); } logger.info(JvmUtils.getJvmInputArguments()); String applicationName = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME); String helixInstanceName = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME); GobblinTaskRunner gobblinWorkUnitRunner = new GobblinTaskRunner(applicationName, helixInstanceName, getApplicationId(), getTaskRunnerId(), ConfigFactory.load(), Optional.<Path>absent()); gobblinWorkUnitRunner.start(); } catch (ParseException pe) { printUsage(options); System.exit(1); } } }
2,262
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinClusterConfigurationKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster; import java.time.Duration; import org.apache.gobblin.annotation.Alpha; /** * A central place for configuration related constants of a Gobblin Cluster. * * @author Yinan Li */ @Alpha public class GobblinClusterConfigurationKeys { public static final String GOBBLIN_CLUSTER_PREFIX = "gobblin.cluster."; // Task separation properties public static final String ENABLE_TASK_IN_SEPARATE_PROCESS = GOBBLIN_CLUSTER_PREFIX + "enableTaskInSeparateProcess"; public static final String TASK_CLASSPATH = GOBBLIN_CLUSTER_PREFIX + "task.classpath"; public static final String TASK_LOG_CONFIG = GOBBLIN_CLUSTER_PREFIX + "task.log.config"; public static final String TASK_JVM_OPTIONS = GOBBLIN_CLUSTER_PREFIX + "task.jvm.options"; // General Gobblin Cluster application configuration properties. public static final String APPLICATION_NAME_OPTION_NAME = "app_name"; public static final String APPLICATION_ID_OPTION_NAME = "app_id"; public static final String STANDALONE_CLUSTER_MODE = "standalone_cluster"; public static final String STANDALONE_CLUSTER_MODE_KEY = GOBBLIN_CLUSTER_PREFIX + "standaloneMode"; public static final boolean DEFAULT_STANDALONE_CLUSTER_MODE = false; // Root working directory for Gobblin cluster public static final String CLUSTER_WORK_DIR = GOBBLIN_CLUSTER_PREFIX + "workDir"; public static final String DISTRIBUTED_JOB_LAUNCHER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "distributedJobLauncherEnabled"; public static final boolean DEFAULT_DISTRIBUTED_JOB_LAUNCHER_ENABLED = false; public static final String DISTRIBUTED_JOB_LAUNCHER_BUILDER = GOBBLIN_CLUSTER_PREFIX + "distributedJobLauncherBuilder"; // Helix configuration properties. public static final String DEDICATED_JOB_CLUSTER_CONTROLLER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "dedicatedJobClusterController.enabled"; public static final String HELIX_CLUSTER_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "helix.cluster.name"; public static final String MANAGER_CLUSTER_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "manager.cluster.name"; public static final String DEDICATED_MANAGER_CLUSTER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "dedicatedManagerCluster.enabled"; public static final String DEDICATED_TASK_DRIVER_CLUSTER_CONTROLLER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "dedicatedTaskDriverClusterController.enabled"; public static final String TASK_DRIVER_CLUSTER_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "taskDriver.cluster.name"; public static final String DEDICATED_TASK_DRIVER_CLUSTER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "dedicatedTaskDriverCluster.enabled"; public static final String TASK_DRIVER_ENABLED = GOBBLIN_CLUSTER_PREFIX + "taskDriver.enabled"; public static final String ZK_CONNECTION_STRING_KEY = GOBBLIN_CLUSTER_PREFIX + "zk.connection.string"; public static final String WORK_UNIT_FILE_PATH = GOBBLIN_CLUSTER_PREFIX + "work.unit.file.path"; public static final String HELIX_INSTANCE_NAME_OPTION_NAME = "helix_instance_name"; public static final String HELIX_INSTANCE_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "helixInstanceName"; public static final String HELIX_INSTANCE_TAGS_OPTION_NAME = "helix_instance_tags"; // The number of tasks that can be running concurrently in the same worker process public static final String HELIX_CLUSTER_TASK_CONCURRENCY = GOBBLIN_CLUSTER_PREFIX + "helix.taskConcurrency"; public static final int HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT = 40; // Should job be executed in the scheduler thread? public static final String JOB_EXECUTE_IN_SCHEDULING_THREAD = GOBBLIN_CLUSTER_PREFIX + "job.executeInSchedulingThread"; public static final boolean JOB_EXECUTE_IN_SCHEDULING_THREAD_DEFAULT = true; // Helix tagging public static final String HELIX_JOB_TAG_KEY = GOBBLIN_CLUSTER_PREFIX + "helixJobTag"; public static final String HELIX_PLANNING_JOB_TAG_KEY = GOBBLIN_CLUSTER_PREFIX + "helixPlanningJobTag"; public static final String HELIX_INSTANCE_TAGS_KEY = GOBBLIN_CLUSTER_PREFIX + "helixInstanceTags"; public static final String HELIX_DEFAULT_TAG = "GobblinHelixDefaultTag"; // Helix job quota public static final String HELIX_JOB_TYPE_KEY = GOBBLIN_CLUSTER_PREFIX + "helixJobType"; public static final String HELIX_PLANNING_JOB_TYPE_KEY = GOBBLIN_CLUSTER_PREFIX + "helixPlanningJobType"; // Planning job properties public static final String PLANNING_JOB_NAME_PREFIX = "PlanningJob"; public static final String PLANNING_CONF_PREFIX = GOBBLIN_CLUSTER_PREFIX + "planning."; public static final String PLANNING_ID_KEY = PLANNING_CONF_PREFIX + "idKey"; public static final String PLANNING_JOB_CREATE_TIME = PLANNING_CONF_PREFIX + "createTime"; // Actual job properties public static final String ACTUAL_JOB_NAME_PREFIX = "ActualJob"; // job spec operation public static final String JOB_ALWAYS_DELETE = GOBBLIN_CLUSTER_PREFIX + "job.alwaysDelete"; // Job quota configuration as a comma separated list of name value pairs separated by a colon. // Example: A:1,B:38,DEFAULT:1 public static final String HELIX_TASK_QUOTA_CONFIG_KEY = "gobblin.cluster.helixTaskQuotaConfig"; /** * A path pointing to a directory that contains job execution files to be executed by Gobblin. This directory can * have a nested structure. * * @see <a href="https://gobblin.readthedocs.io/en/latest/user-guide/Working-with-Job-Configuration-Files/">Job Config Files</a> */ public static final String JOB_CONF_PATH_KEY = GOBBLIN_CLUSTER_PREFIX + "job.conf.path"; //A java.util.regex specifying the subset of jobs under JOB_CONF_PATH to be run. public static final String JOBS_TO_RUN = GOBBLIN_CLUSTER_PREFIX + "jobsToRun"; public static final String INPUT_WORK_UNIT_DIR_NAME = "_workunits"; public static final String OUTPUT_TASK_STATE_DIR_NAME = "_taskstates"; // This is the directory to store job.state files when a state store is used. // Note that a .job.state file is not the same thing as a .jst file. public static final String JOB_STATE_DIR_NAME = "_jobstates"; public static final String TAR_GZ_FILE_SUFFIX = ".tar.gz"; // Other misc configuration properties. public static final String TASK_SUCCESS_OPTIONAL_KEY = "TASK_SUCCESS_OPTIONAL"; public static final String GOBBLIN_CLUSTER_LOG4J_CONFIGURATION_FILE = "log4j-cluster.properties"; public static final String JOB_CONFIGURATION_MANAGER_KEY = GOBBLIN_CLUSTER_PREFIX + "job.configuration.manager"; public static final String JOB_SPEC_REFRESH_INTERVAL = GOBBLIN_CLUSTER_PREFIX + "job.spec.refresh.interval"; public static final String JOB_SPEC_URI = GOBBLIN_CLUSTER_PREFIX + "job.spec.uri"; public static final String SPEC_CONSUMER_CLASS_KEY = GOBBLIN_CLUSTER_PREFIX + "specConsumer.class"; public static final String DEFAULT_SPEC_CONSUMER_CLASS = "org.apache.gobblin.service.SimpleKafkaSpecConsumer"; public static final String DEFAULT_STREAMING_SPEC_CONSUMER_CLASS = "org.apache.gobblin.service.StreamingKafkaSpecConsumer"; public static final String JOB_CATALOG_KEY = GOBBLIN_CLUSTER_PREFIX + "job.catalog"; public static final String DEFAULT_JOB_CATALOG = "org.apache.gobblin.runtime.job_catalog.NonObservingFSJobCatalog"; public static final String STOP_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "stopTimeoutSeconds"; public static final long DEFAULT_STOP_TIMEOUT_SECONDS = 60; public static final String HELIX_WORKFLOW_EXPIRY_TIME_SECONDS = GOBBLIN_CLUSTER_PREFIX + "workflow.expirySeconds"; public static final long DEFAULT_HELIX_WORKFLOW_EXPIRY_TIME_SECONDS = 6 * 60 * 60; public static final String HELIX_JOB_STOP_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "helix.job.stopTimeoutSeconds"; public static final long DEFAULT_HELIX_JOB_STOP_TIMEOUT_SECONDS = 10L; public static final String TASK_RUNNER_SUITE_BUILDER = GOBBLIN_CLUSTER_PREFIX + "taskRunnerSuite.builder"; public static final String HELIX_JOB_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "helixJobName"; public static final String HELIX_JOB_TIMEOUT_ENABLED_KEY = "helix.job.timeout.enabled"; public static final String DEFAULT_HELIX_JOB_TIMEOUT_ENABLED = "false"; public static final String HELIX_JOB_TIMEOUT_SECONDS = "helix.job.timeout.seconds"; public static final String DEFAULT_HELIX_JOB_TIMEOUT_SECONDS = "10800"; public static final String HELIX_TASK_NAME_KEY = GOBBLIN_CLUSTER_PREFIX + "helixTaskName"; public static final String HELIX_TASK_TIMEOUT_SECONDS = "helix.task.timeout.seconds"; public static final String HELIX_TASK_MAX_ATTEMPTS_KEY = "helix.task.maxAttempts"; public static final String HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "workflowSubmissionTimeoutSeconds"; public static final long DEFAULT_HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS = 300; public static final String HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "workflowDeleteTimeoutSeconds"; public static final long DEFAULT_HELIX_WORKFLOW_DELETE_TIMEOUT_SECONDS = 300; public static final String HELIX_WORKFLOW_LISTING_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "workflowListingTimeoutSeconds"; public static final long DEFAULT_HELIX_WORKFLOW_LISTING_TIMEOUT_SECONDS = 60; public static final String CLEAN_ALL_DIST_JOBS = GOBBLIN_CLUSTER_PREFIX + "bootup.clean.dist.jobs"; public static final boolean DEFAULT_CLEAN_ALL_DIST_JOBS = false; public static final String NON_BLOCKING_PLANNING_JOB_ENABLED = GOBBLIN_CLUSTER_PREFIX + "nonBlocking.planningJob.enabled"; public static final boolean DEFAULT_NON_BLOCKING_PLANNING_JOB_ENABLED = false; public static final String KILL_DUPLICATE_PLANNING_JOB = GOBBLIN_CLUSTER_PREFIX + "kill.duplicate.planningJob"; public static final boolean DEFAULT_KILL_DUPLICATE_PLANNING_JOB = true; public static final String CANCEL_RUNNING_JOB_ON_DELETE = GOBBLIN_CLUSTER_PREFIX + "job.cancelRunningJobOnDelete"; public static final String DEFAULT_CANCEL_RUNNING_JOB_ON_DELETE = "false"; // Job Execution ID for Helix jobs is inferred from Flow Execution IDs, but there are scenarios in earlyStop jobs where // this behavior needs to be avoided due to concurrent planning and actual jobs sharing the same execution ID public static final String USE_GENERATED_JOBEXECUTION_IDS = GOBBLIN_CLUSTER_PREFIX + "job.useGeneratedJobExecutionIds"; // By default we cancel job by calling helix stop API. In some cases, jobs just hang in STOPPING state and preventing // new job being launched. We provide this config to give an option to cancel jobs by calling Delete API. Directly delete // a Helix workflow should be safe in Gobblin world, as Gobblin job is stateless for Helix since we implement our own state store public static final String CANCEL_HELIX_JOB_BY_DELETE = GOBBLIN_CLUSTER_PREFIX + "job.cancelHelixJobByDelete"; public static final boolean DEFAULT_CANCEL_HELIX_JOB_BY_DELETE = false; public static final String HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "job.stoppingStateTimeoutSeconds"; public static final long DEFAULT_HELIX_JOB_STOPPING_STATE_TIMEOUT_SECONDS = 300; public static final String CONTAINER_HEALTH_METRICS_SERVICE_ENABLED = GOBBLIN_CLUSTER_PREFIX + "container.health.metrics.service.enabled" ; public static final boolean DEFAULT_CONTAINER_HEALTH_METRICS_SERVICE_ENABLED = false; //Config to enable/disable container "suicide" on health check failures. To be used in execution modes, where the exiting // container can be replaced with another container e.g. Gobblin-on-Yarn mode. public static final String CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED = GOBBLIN_CLUSTER_PREFIX + "container.exitOnHealthCheckFailure"; public static final boolean DEFAULT_CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED = false; // Config to specify the resource requirement for each Gobblin job run, so that helix tasks within this job will // be assigned to containers with desired resource. This config need to cooperate with helix job tag, so that helix // cluster knows how to distribute tasks to correct containers. public static final String HELIX_JOB_CONTAINER_MEMORY_MBS = GOBBLIN_CLUSTER_PREFIX + "job.container.memory.mbs"; public static final String HELIX_JOB_CONTAINER_CORES = GOBBLIN_CLUSTER_PREFIX + "job.container.cores"; //Config to enable/disable reuse of existing Helix Cluster public static final String HELIX_CLUSTER_OVERWRITE_KEY = GOBBLIN_CLUSTER_PREFIX + "helix.overwrite"; public static final boolean DEFAULT_HELIX_CLUSTER_OVERWRITE = true; //Config to enable/disable cluster creation. Should set this config to false if Helix-as-a-Service is used to manage // the cluster public static final String IS_HELIX_CLUSTER_MANAGED = GOBBLIN_CLUSTER_PREFIX + "isHelixClusterManaged"; public static final boolean DEFAULT_IS_HELIX_CLUSTER_MANAGED = false; public static final String HADOOP_CONFIG_OVERRIDES_PREFIX = GOBBLIN_CLUSTER_PREFIX + "hadoop.inject"; //Configurations that will be set dynamically when a GobblinTaskRunner/GobblinHelixTask are instantiated. public static final String GOBBLIN_HELIX_PREFIX = "gobblin.helix."; public static final String HELIX_JOB_ID_KEY = GOBBLIN_HELIX_PREFIX + "jobId"; public static final String HELIX_TASK_ID_KEY = GOBBLIN_HELIX_PREFIX + "taskId"; public static final String HELIX_PARTITION_ID_KEY = GOBBLIN_HELIX_PREFIX + "partitionId" ; public static final String TASK_RUNNER_HOST_NAME_KEY = GOBBLIN_HELIX_PREFIX + "hostName"; public static final String CONTAINER_ID_KEY = GOBBLIN_HELIX_PREFIX + "containerId"; public static final String GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX = GOBBLIN_CLUSTER_PREFIX + "sysProps"; public static final String HELIX_JOB_SCHEDULING_THROTTLE_ENABLED_KEY = "helix.job.scheduling.throttle.enabled"; public static final boolean DEFAULT_HELIX_JOB_SCHEDULING_THROTTLE_ENABLED_KEY = false; public static final String HELIX_JOB_SCHEDULING_THROTTLE_TIMEOUT_SECONDS_KEY = "helix.job.scheduling.throttle.timeout.seconds"; public static final long DEFAULT_HELIX_JOB_SCHEDULING_THROTTLE_TIMEOUT_SECONDS_KEY = Duration.ofMinutes(40).getSeconds();; }
2,263
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/event/CancelJobConfigArrivalEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster.event; public class CancelJobConfigArrivalEvent { private final String jobUri; public CancelJobConfigArrivalEvent(String jobUri) { this.jobUri = jobUri; } /** * Get the job uri. * * @return the job uri */ public String getJoburi() { return this.jobUri; } }
2,264
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/event/DeleteJobConfigArrivalEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster.event; import java.util.Properties; import org.apache.gobblin.annotation.Alpha; /** * A type of events for the deletion of a job configuration to be used with a * {@link com.google.common.eventbus.EventBus}. * */ @Alpha public class DeleteJobConfigArrivalEvent { private final String jobName; private final Properties jobConfig; public DeleteJobConfigArrivalEvent(String jobName, Properties jobConfig) { this.jobName = jobName; this.jobConfig = new Properties(); if (null != jobConfig) { this.jobConfig.putAll(jobConfig); } } /** * Get the job name. * * @return the job name */ public String getJobName() { return this.jobName; } /** * Get the job config in a {@link Properties} object. * * @return the job config in a {@link Properties} object */ public Properties getJobConfig() { return this.jobConfig; } }
2,265
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/event/UpdateJobConfigArrivalEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster.event; import java.util.Properties; import org.apache.gobblin.annotation.Alpha; /** * A type of events for the update of a job configuration to be used with a * {@link com.google.common.eventbus.EventBus}. * */ @Alpha public class UpdateJobConfigArrivalEvent { private final String jobName; private final Properties jobConfig; public UpdateJobConfigArrivalEvent(String jobName, Properties jobConfig) { this.jobName = jobName; this.jobConfig = new Properties(); this.jobConfig.putAll(jobConfig); } /** * Get the job name. * * @return the job name */ public String getJobName() { return this.jobName; } /** * Get the job config in a {@link Properties} object. * * @return the job config in a {@link Properties} object */ public Properties getJobConfig() { return this.jobConfig; } }
2,266
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/event/ClusterManagerShutdownRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster.event; import org.apache.gobblin.annotation.Alpha; /** * A dummy class representing an ApplicationMaster shutdown request to be used with a * {@link com.google.common.eventbus.EventBus}. * * @author Yinan Li */ @Alpha public class ClusterManagerShutdownRequest { }
2,267
0
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster
Create_ds/gobblin/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/event/NewJobConfigArrivalEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.cluster.event; import java.util.Properties; import org.apache.gobblin.annotation.Alpha; /** * A type of events for the arrival of a new job configuration to be used with a * {@link com.google.common.eventbus.EventBus}. * * @author Yinan Li */ @Alpha public class NewJobConfigArrivalEvent { private final String jobName; private final Properties jobConfig; public NewJobConfigArrivalEvent(String jobName, Properties jobConfig) { this.jobName = jobName; this.jobConfig = new Properties(); this.jobConfig.putAll(jobConfig); } /** * Get the job name. * * @return the job name */ public String getJobName() { return this.jobName; } /** * Get the job config in a {@link Properties} object. * * @return the job config in a {@link Properties} object */ public Properties getJobConfig() { return this.jobConfig; } }
2,268
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util/commit/SetPermissionCommitStepTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.util.commit; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.apache.gobblin.data.management.copy.OwnerAndPermission; /** * Test for {@link SetPermissionCommitStep}. */ @Test(groups = { "gobblin.commit" }) public class SetPermissionCommitStepTest { private static final String ROOT_DIR = "set-permission-commit-step-test"; private FileSystem fs; private SetPermissionCommitStep step; Path dir1; FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL); @BeforeClass public void setUp() throws IOException { this.fs = FileSystem.getLocal(new Configuration()); this.fs.delete(new Path(ROOT_DIR), true); dir1 = new Path(ROOT_DIR, "dir1"); this.fs.mkdirs(dir1); OwnerAndPermission ownerAndPermission = new OwnerAndPermission("owner", "group", permission); Map<String, OwnerAndPermission> pathAndPermissions = new HashMap<>(); pathAndPermissions.put(dir1.toString(), ownerAndPermission); this.step = new SetPermissionCommitStep(this.fs, pathAndPermissions, new Properties()); } @AfterClass public void tearDown() throws IOException { this.fs.delete(new Path(ROOT_DIR), true); } @Test public void testExecute() throws IOException { Assert.assertNotEquals(this.fs.getFileStatus(dir1).getPermission(), permission); this.step.execute(); Assert.assertEquals(this.fs.getFileStatus(dir1).getPermission(), permission); } }
2,269
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util/request_allocation/SimpleHiveDatasetTieringPrioritizerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.util.request_allocation; import java.util.Properties; import org.apache.hadoop.hive.ql.metadata.Table; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.data.management.copy.hive.HiveDataset; import org.apache.gobblin.data.management.partition.CopyableDatasetRequestor; public class SimpleHiveDatasetTieringPrioritizerTest { @Test public void test() throws Exception { Properties props = new Properties(); props.put(SimpleHiveDatasetTieringPrioritizer.TIER_KEY + ".0", "importantdb,somedb.importanttable"); props.put(SimpleHiveDatasetTieringPrioritizer.TIER_KEY + ".1", "adb"); SimpleHiveDatasetTieringPrioritizer prioritizer = new SimpleHiveDatasetTieringPrioritizer(props); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("importantdb", "tablea"), getRequestor("importantdb", "tableb")), 0); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("importantdb", "tablea"), getRequestor("otherdb", "tableb")), -1); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("somedb", "importanttable"), getRequestor("importantdb", "tableb")), 0); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("somedb", "importanttable"), getRequestor("somedb", "tableb")), -1); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("adb", "tablea"), getRequestor("importantdb", "tableb")), 1); Assert.assertEquals(prioritizer.compareRequestors(getRequestor("adb", "tablea"), getRequestor("somedb", "tableb")), -1); } private CopyableDatasetRequestor getRequestor(String dbName, String tableName) { CopyableDatasetRequestor requestor = Mockito.mock(CopyableDatasetRequestor.class); HiveDataset dataset = Mockito.mock(HiveDataset.class); Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table()); table.setDbName(dbName); table.setTableName(tableName); Mockito.when(dataset.getTable()).thenReturn(table); Mockito.when(requestor.getDataset()).thenReturn(dataset); return requestor; } }
2,270
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util/test/RetentionTestHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.util.test; import java.io.InputStream; import java.util.Map.Entry; import java.util.Properties; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.config.client.ConfigClient; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.retention.DatasetCleaner; import org.apache.gobblin.data.management.retention.dataset.CleanableDataset; import org.apache.gobblin.data.management.retention.dataset.CleanableDatasetBase; import org.apache.gobblin.data.management.retention.profile.MultiCleanableDatasetFinder; import org.apache.gobblin.dataset.Dataset; import org.apache.gobblin.dataset.DatasetsFinder; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.reflection.GobblinConstructorUtils; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Helper methods for Retention integration tests */ public class RetentionTestHelper { /** * * Does gobblin retention for test data. {@link DatasetCleaner} which does retention in production can not be directly called as we need to resolve some * runtime properties like ${testNameTempPath}. This directory contains all the setup data created for a test by {@link RetentionTestDataGenerator#setup()}. * It is unique for each test. * The default {@link ConfigClient} used by {@link DatasetCleaner} connects to config store configs. We need to provide a * mock {@link ConfigClient} since the configs are in classpath and not on config store. * * @param retentionConfigClasspathResource this is the same jobProps/config files used while running a real retention job * @param testNameTempPath temp path for this test where test data is generated */ public static void clean(FileSystem fs, Path retentionConfigClasspathResource, Optional<Path> additionalJobPropsClasspathResource, Path testNameTempPath) throws Exception { Properties additionalJobProps = new Properties(); if (additionalJobPropsClasspathResource.isPresent()) { try (final InputStream stream = RetentionTestHelper.class.getClassLoader().getResourceAsStream(additionalJobPropsClasspathResource.get().toString())) { additionalJobProps.load(stream); } } if (retentionConfigClasspathResource.getName().endsWith(".job")) { Properties jobProps = new Properties(); try (final InputStream stream = RetentionTestHelper.class.getClassLoader().getResourceAsStream(retentionConfigClasspathResource.toString())) { jobProps.load(stream); for (Entry<Object, Object> entry : jobProps.entrySet()) { jobProps.put(entry.getKey(), StringUtils.replace((String)entry.getValue(), "${testNameTempPath}", testNameTempPath.toString())); } } MultiCleanableDatasetFinder finder = new MultiCleanableDatasetFinder(fs, jobProps); for (Dataset dataset : finder.findDatasets()) { ((CleanableDataset)dataset).clean(); } } else { Config testConfig = ConfigFactory.parseResources(retentionConfigClasspathResource.toString()) .withFallback(ConfigFactory.parseMap(ImmutableMap.of("testNameTempPath", PathUtils.getPathWithoutSchemeAndAuthority(testNameTempPath).toString()))).resolve(); ConfigClient client = mock(ConfigClient.class); when(client.getConfig(any(String.class))).thenReturn(testConfig); Properties jobProps = new Properties(); jobProps.setProperty(CleanableDatasetBase.SKIP_TRASH_KEY, Boolean.toString(true)); jobProps.setProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI, "dummy"); jobProps.setProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED, "true"); jobProps.putAll(additionalJobProps); @SuppressWarnings("unchecked") DatasetsFinder<CleanableDataset> finder = (DatasetsFinder<CleanableDataset>) GobblinConstructorUtils.invokeFirstConstructor( Class.forName(testConfig.getString(MultiCleanableDatasetFinder.DATASET_FINDER_CLASS_KEY)), ImmutableList.of(fs, jobProps, testConfig, client), ImmutableList.of(fs, jobProps, client)); for (CleanableDataset dataset : finder.findDatasets()) { dataset.clean(); } } } public static void clean(FileSystem fs, Path retentionConfigClasspathResource, Path testNameTempPath) throws Exception { clean(fs, retentionConfigClasspathResource, Optional.<Path>absent(), testNameTempPath); } }
2,271
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/util/test/RetentionTestDataGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.util.test; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.Comparator; import java.util.List; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.joda.time.DateTimeUtils; import org.joda.time.DateTimeUtils.MillisProvider; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.testng.Assert; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.util.PathUtils; /** * A class to setup files and folders for a retention test. * Class reads a setup-validate.conf file at <code>testSetupConfPath</code> to create data for a retention test. * The config file needs to be in HOCON and parsed using {@link ConfigFactory}. * All the paths listed at {@link #TEST_DATA_CREATE_KEY} in the config file will be created under a <code>testTempDirPath</code>. * <br> * After retention job is run on this data, the {@link #validate()} method can be used to validate deleted files and retained files * listed in {@link #TEST_DATA_VALIDATE_DELETED_KEY} and {@link #TEST_DATA_VALIDATE_RETAINED_KEY} respectively. * <br> * Below is the format of the config file this class parses. * <br> * <pre> * gobblin.test : { // Time the system clock is set before starting the test currentTime : "02/19/2016 11:00:00" // Paths to create for the test //{path: path of test file in the dataset, modTime: set the modification time of this path in "MM/dd/yyyy HH:mm:ss"} create : [ {path:"/user/gobblin/Dataset1/Version1", modTime:"02/10/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version2", modTime:"02/11/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version3", modTime:"02/12/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version4", modTime:"02/13/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version5", modTime:"02/14/2016 10:00:00"} ] // Validation configs to use after the test validate : { // Paths that should exist after retention is run retained : [ {path:"/user/gobblin/Dataset1/Version4", modTime:"02/13/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version5", modTime:"02/14/2016 10:00:00"} ] // Paths that should be deleted after retention is run deleted : [ {path:"/user/gobblin/Dataset1/Version1", modTime:"02/10/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version2", modTime:"02/11/2016 10:00:00"}, {path:"/user/gobblin/Dataset1/Version3", modTime:"02/12/2016 10:00:00"} ] } } * </pre> */ public class RetentionTestDataGenerator { private static final String DATA_GENERATOR_KEY = "gobblin.test"; private static final String DATA_GENERATOR_PREFIX = DATA_GENERATOR_KEY + "."; private static final String TEST_CURRENT_TIME_KEY = DATA_GENERATOR_PREFIX + "currentTime"; private static final String TEST_DATA_CREATE_KEY = DATA_GENERATOR_PREFIX + "create"; private static final String TEST_DATA_VALIDATE_RETAINED_KEY = DATA_GENERATOR_PREFIX + "validate.retained"; private static final String TEST_DATA_VALIDATE_DELETED_KEY = DATA_GENERATOR_PREFIX + "validate.deleted"; private static final String TEST_DATA_VALIDATE_PERMISSIONS_KEY = DATA_GENERATOR_PREFIX + "validate.permissions"; private static final String TEST_DATA_PATH_LOCAL_KEY = "path"; private static final String TEST_DATA_MOD_TIME_LOCAL_KEY = "modTime"; private static final String TEST_DATA_PERMISSIONS_KEY = "permission"; private static final DateTimeFormatter FORMATTER = DateTimeFormat. forPattern("MM/dd/yyyy HH:mm:ss").withZone(DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME)); private final Path testTempDirPath; private final FileSystem fs; private final Config setupConfig; /** * @param testTempDirPath under which all test files are created on the FileSystem * @param testSetupConfPath setup config file path in classpath */ public RetentionTestDataGenerator(Path testTempDirPath, Path testSetupConfPath, FileSystem fs) { this.fs = fs; this.testTempDirPath = testTempDirPath; this.setupConfig = ConfigFactory.parseResources(PathUtils.getPathWithoutSchemeAndAuthority(testSetupConfPath).toString()); if (!this.setupConfig.hasPath(DATA_GENERATOR_KEY)) { throw new RuntimeException(String.format("Failed to load setup config at %s", testSetupConfPath.toString())); } } /** * Create all the paths listed under {@link #TEST_DATA_CREATE_KEY}. If a path's config has a {@link #TEST_DATA_MOD_TIME_LOCAL_KEY} specified, * the modification time of this path is updated to this value. */ public void setup() throws IOException { if (this.setupConfig.hasPath(TEST_CURRENT_TIME_KEY)) { DateTimeUtils.setCurrentMillisProvider(new FixedThreadLocalMillisProvider(FORMATTER.parseDateTime( setupConfig.getString(TEST_CURRENT_TIME_KEY)).getMillis())); } List<? extends Config> createConfigs = setupConfig.getConfigList(TEST_DATA_CREATE_KEY); Collections.sort(createConfigs, new Comparator<Config>() { @Override public int compare(Config o1, Config o2) { return o1.getString(TEST_DATA_PATH_LOCAL_KEY).compareTo(o2.getString(TEST_DATA_PATH_LOCAL_KEY)); } }); for (Config fileToCreate : createConfigs) { Path fullFilePath = new Path(testTempDirPath, PathUtils.withoutLeadingSeparator(new Path(fileToCreate .getString(TEST_DATA_PATH_LOCAL_KEY)))); if (!this.fs.mkdirs(fullFilePath)) { throw new RuntimeException("Failed to create test file " + fullFilePath); } if (fileToCreate.hasPath(TEST_DATA_MOD_TIME_LOCAL_KEY)) { File file = new File(PathUtils.getPathWithoutSchemeAndAuthority(fullFilePath).toString()); boolean modifiedFile = file.setLastModified(FORMATTER.parseMillis(fileToCreate.getString(TEST_DATA_MOD_TIME_LOCAL_KEY))); if (!modifiedFile) { throw new IOException(String.format("Unable to set the last modified time for file %s!", file)); } } if (fileToCreate.hasPath(TEST_DATA_PERMISSIONS_KEY)) { this.fs.setPermission(fullFilePath, new FsPermission(fileToCreate.getString(TEST_DATA_PERMISSIONS_KEY))); } } } /** * Validate that all paths in {@link #TEST_DATA_VALIDATE_DELETED_KEY} are deleted and * all paths in {@link #TEST_DATA_VALIDATE_RETAINED_KEY} still exist */ public void validate() throws IOException { List<? extends Config> retainedConfigs = setupConfig.getConfigList(TEST_DATA_VALIDATE_RETAINED_KEY); for (Config retainedConfig : retainedConfigs) { Path fullFilePath = new Path(testTempDirPath, PathUtils.withoutLeadingSeparator(new Path(retainedConfig .getString(TEST_DATA_PATH_LOCAL_KEY)))); Assert.assertTrue(this.fs.exists(fullFilePath), String.format("%s should not be deleted", fullFilePath.toString())); } List<? extends Config> deletedConfigs = setupConfig.getConfigList(TEST_DATA_VALIDATE_DELETED_KEY); for (Config retainedConfig : deletedConfigs) { Path fullFilePath = new Path(testTempDirPath, PathUtils.withoutLeadingSeparator(new Path(retainedConfig .getString(TEST_DATA_PATH_LOCAL_KEY)))); Assert.assertFalse(this.fs.exists(fullFilePath), String.format("%s should be deleted", fullFilePath.toString())); } // Validate permissions if (setupConfig.hasPath(TEST_DATA_VALIDATE_PERMISSIONS_KEY)) { List<? extends Config> permissionsConfigs = setupConfig.getConfigList(TEST_DATA_VALIDATE_PERMISSIONS_KEY); for (Config permissionsConfig : permissionsConfigs) { Path fullFilePath = new Path(testTempDirPath, PathUtils.withoutLeadingSeparator(new Path(permissionsConfig .getString(TEST_DATA_PATH_LOCAL_KEY)))); Assert.assertEquals(this.fs.getFileStatus(fullFilePath).getPermission(), new FsPermission(permissionsConfig.getString(TEST_DATA_PERMISSIONS_KEY)), String.format("Permissions check failed for %s", fullFilePath)); } } this.cleanup(); } public void cleanup() throws IOException { DateTimeUtils.setCurrentMillisSystem(); if (this.fs.exists(testTempDirPath)) { if (!this.fs.delete(testTempDirPath, true)) { throw new IOException("Failed to clean up path " + this.testTempDirPath); } } } /** * A Joda time {@link MillisProvider} used provide a mock fixed current time. * Needs to be thread local as TestNg tests may run in parallel */ public static class FixedThreadLocalMillisProvider implements MillisProvider { private ThreadLocal<Long> currentTimeThreadLocal = new ThreadLocal<Long>() { @Override protected Long initialValue() { return System.currentTimeMillis(); } }; public FixedThreadLocalMillisProvider(Long millis) { currentTimeThreadLocal.set(millis); } @Override public long getMillis() { return currentTimeThreadLocal.get(); } } }
2,272
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/runtime
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcpTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime.embedded; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.sql.SQLException; import java.sql.Statement; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.avro.Schema; import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.api.client.util.Charsets; import com.google.common.collect.Sets; import com.google.common.io.Files; import com.typesafe.config.Config; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.converter.GobblinMetricsPinotFlattenerConverter; import org.apache.gobblin.data.management.copy.CopyConfiguration; import org.apache.gobblin.data.management.copy.CopySource; import org.apache.gobblin.data.management.copy.SchemaCheckedCopySource; import org.apache.gobblin.runtime.api.JobExecutionResult; import org.apache.gobblin.util.HiveJdbcConnector; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.filesystem.DataFileVersionStrategy; public class EmbeddedGobblinDistcpTest { private HiveJdbcConnector jdbcConnector; private IMetaStoreClient metaStoreClient; private static final String TEST_DB = "testdb"; private static final String TEST_TABLE = "test_table"; private static final String TARGET_PATH = "/tmp/target"; private static final String TARGET_DB = "target"; @BeforeClass public void setup() throws Exception { try { HiveConf hiveConf = new HiveConf(); // Start a Hive session in this thread and register the UDF SessionState.start(hiveConf); SessionState.get().initTxnMgr(hiveConf); metaStoreClient = new HiveMetaStoreClient(new HiveConf()); jdbcConnector = HiveJdbcConnector.newEmbeddedConnector(2); } catch (HiveException he) { throw new RuntimeException("Failed to start Hive session.", he); } catch (SQLException se) { throw new RuntimeException("Cannot initialize the jdbc-connector due to: ", se); } } @Test public void test() throws Exception { String fileName = "file"; File tmpSource = Files.createTempDir(); tmpSource.deleteOnExit(); File tmpTarget = Files.createTempDir(); tmpTarget.deleteOnExit(); File tmpFile = new File(tmpSource, fileName); tmpFile.createNewFile(); FileOutputStream os = new FileOutputStream(tmpFile); for (int i = 0; i < 100; i++) { os.write("myString".getBytes(Charsets.UTF_8)); } os.close(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); EmbeddedGobblinDistcp embedded = new EmbeddedGobblinDistcp(new Path(tmpSource.getAbsolutePath()), new Path(tmpTarget.getAbsolutePath())); embedded.setLaunchTimeout(30, TimeUnit.SECONDS); embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertTrue(new File(tmpTarget, fileName).exists()); } @Test public void hiveTest() throws Exception { Statement statement = jdbcConnector.getConnection().createStatement(); // Start from a fresh Hive backup: No DB, no table. // Create a DB. statement.execute("CREATE database if not exists " + TEST_DB); // Create a table. String tableCreationSQL = "CREATE TABLE IF NOT EXISTS $testdb.$test_table (id int, name String)\n" + "ROW FORMAT DELIMITED\n" + "FIELDS TERMINATED BY '\\t'\n" + "LINES TERMINATED BY '\\n'\n" + "STORED AS TEXTFILE"; statement.execute(tableCreationSQL.replace("$testdb",TEST_DB).replace("$test_table", TEST_TABLE)); // Insert data String dataInsertionSQL = "INSERT INTO TABLE $testdb.$test_table VALUES (1, 'one'), (2, 'two'), (3, 'three')"; statement.execute(dataInsertionSQL.replace("$testdb",TEST_DB).replace("$test_table", TEST_TABLE)); String templateLoc = "templates/hiveDistcp.template"; // Either of the "from" or "to" will be used here since it is a Hive Distcp. EmbeddedGobblinDistcp embeddedHiveDistcp = new EmbeddedGobblinDistcp(templateLoc, new Path("a"), new Path("b")); embeddedHiveDistcp.setConfiguration("hive.dataset.copy.target.database", TARGET_DB); embeddedHiveDistcp.setConfiguration("hive.dataset.copy.target.table.prefixReplacement", TARGET_PATH); String dbPathTemplate = "/$testdb.db/$test_table"; String rootPathOfSourceDate = metaStoreClient.getConfigValue("hive.metastore.warehouse.dir", "") .concat(dbPathTemplate.replace("$testdb", TEST_DB).replace("$test_table",TEST_TABLE) ); embeddedHiveDistcp.setConfiguration("hive.dataset.copy.target.table.prefixToBeReplaced", rootPathOfSourceDate); embeddedHiveDistcp.run(); // Verify the table is existed in the target and file exists in the target location. metaStoreClient.tableExists(TARGET_DB, TEST_TABLE); FileSystem fs = FileSystem.getLocal(new Configuration()); fs.exists(new Path(TARGET_PATH)); } // Tearing down the Hive components from derby driver if there's anything generated through the test. @AfterClass public void hiveTearDown() throws Exception { FileSystem fs = FileSystem.getLocal(new Configuration()); Path targetPath = new Path(TARGET_PATH); if (fs.exists(targetPath)) { fs.delete(targetPath, true); } if (metaStoreClient != null) { // Clean out all tables in case there are any, to avoid db-drop failure. for (String tblName : metaStoreClient.getAllTables(TEST_DB)) { metaStoreClient.dropTable(TEST_DB, tblName); } if (metaStoreClient.getAllDatabases().contains(TEST_DB)) { metaStoreClient.dropDatabase(TEST_DB); } // Clean the target table and DB if (metaStoreClient.tableExists("target", TEST_TABLE)) { metaStoreClient.dropTable("target", TEST_TABLE, true, true); } if (metaStoreClient.getAllDatabases().contains(TARGET_DB)) { metaStoreClient.dropDatabase(TARGET_DB); } metaStoreClient.close(); } jdbcConnector.close(); } @Test public void testCheckSchema() throws Exception { Schema schema = null; try (InputStream is = GobblinMetricsPinotFlattenerConverter.class.getClassLoader().getResourceAsStream("avroSchemaManagerTest/expectedSchema.avsc")) { schema = new Schema.Parser().parse(is); } catch (IOException e) { e.printStackTrace(); } String fileName = "file.avro"; File tmpSource = Files.createTempDir(); tmpSource.deleteOnExit(); File tmpTarget = Files.createTempDir(); tmpTarget.deleteOnExit(); File tmpFile = new File(tmpSource, fileName); tmpFile.createNewFile(); GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema); DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter); dataFileWriter.create(schema, tmpFile); for(int i = 0; i < 100; i++) { GenericRecord record = new GenericData.Record(schema); record.put("foo", i); dataFileWriter.append(record); } Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); EmbeddedGobblinDistcp embedded = new EmbeddedGobblinDistcp(new Path(tmpSource.getAbsolutePath()), new Path(tmpTarget.getAbsolutePath())); embedded.setConfiguration(CopySource.SCHEMA_CHECK_ENABLED, "true"); embedded.setLaunchTimeout(30, TimeUnit.SECONDS); embedded.setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, SchemaCheckedCopySource.class.getName()); embedded.setConfiguration(ConfigurationKeys.AVRO_SCHEMA_CHECK_STRATEGY, "org.apache.gobblin.util.schema_check.AvroSchemaCheckDefaultStrategy"); //test when schema is not the expected one, the job will be aborted. embedded.setConfiguration(ConfigurationKeys.COPY_EXPECTED_SCHEMA, "{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo1\",\"type\":[\"null\",\"int\"],\"doc\":\"this is for test\",\"default\":null}]}"); JobExecutionResult result = embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(result.isSuccessful()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); embedded.setConfiguration(ConfigurationKeys.COPY_EXPECTED_SCHEMA, "{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"string\",\"int\"],\"doc\":\"this is for test\",\"default\":null}]}"); result = embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(result.isSuccessful()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); //test when schema is the expected one, the job will succeed. embedded.setConfiguration(ConfigurationKeys.COPY_EXPECTED_SCHEMA, "{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"null\",\"int\"],\"doc\":\"this is for test\",\"default\":null}]}"); result = embedded.run(); Assert.assertTrue(result.isSuccessful()); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertTrue(new File(tmpTarget, fileName).exists()); } @Test public void testWithVersionPreserve() throws Exception { String fileName = "file"; File tmpSource = Files.createTempDir(); tmpSource.deleteOnExit(); File tmpTarget = Files.createTempDir(); tmpTarget.deleteOnExit(); File tmpFile = new File(tmpSource, fileName); tmpFile.createNewFile(); FileOutputStream os = new FileOutputStream(tmpFile); for (int i = 0; i < 100; i++) { os.write("myString".getBytes(Charsets.UTF_8)); } os.close(); MyDataFileVersion versionStrategy = new MyDataFileVersion(); versionStrategy.setVersion(new Path(tmpFile.getAbsolutePath()), 123L); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); EmbeddedGobblinDistcp embedded = new EmbeddedGobblinDistcp(new Path(tmpSource.getAbsolutePath()), new Path(tmpTarget.getAbsolutePath())); embedded.setLaunchTimeout(30, TimeUnit.SECONDS); embedded.setConfiguration(DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY, MyDataFileVersion.class.getName()); embedded.setConfiguration(CopyConfiguration.PRESERVE_ATTRIBUTES_KEY, "v"); embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertTrue(new File(tmpTarget, fileName).exists()); Assert.assertEquals((long) versionStrategy.getVersion(new Path(tmpTarget.getAbsolutePath(), fileName)), 123l); } @Test public void testWithModTimePreserve() throws Exception { FileSystem fs = FileSystem.getLocal(new Configuration()); String fileName = "file"; File tmpSource = Files.createTempDir(); tmpSource.deleteOnExit(); File tmpTarget = Files.createTempDir(); tmpTarget.deleteOnExit(); File tmpFile = new File(tmpSource, fileName); Assert.assertTrue(tmpFile.createNewFile()); FileOutputStream os = new FileOutputStream(tmpFile); for (int i = 0; i < 100; i++) { os.write("myString".getBytes(Charsets.UTF_8)); } os.close(); long originalModTime = fs.getFileStatus(new Path(tmpFile.getPath())).getModificationTime(); Assert.assertNotNull(originalModTime); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertFalse(new File(tmpTarget, fileName).exists()); EmbeddedGobblinDistcp embedded = new EmbeddedGobblinDistcp(new Path(tmpSource.getAbsolutePath()), new Path(tmpTarget.getAbsolutePath())); embedded.setLaunchTimeout(30, TimeUnit.SECONDS); embedded.setConfiguration(CopyConfiguration.PRESERVE_ATTRIBUTES_KEY, "t"); embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertTrue(new File(tmpTarget, fileName).exists()); Assert.assertEquals(fs.getFileStatus(new Path(new File(tmpTarget, fileName).getAbsolutePath())).getModificationTime() , originalModTime); } @Test public void testWithModTimePreserveNegative() throws Exception { FileSystem fs = FileSystem.getLocal(new Configuration()); String fileName = "file_oh"; File tmpSource = Files.createTempDir(); tmpSource.deleteOnExit(); File tmpTarget = Files.createTempDir(); tmpTarget.deleteOnExit(); File tmpFile = new File(tmpSource, fileName); Assert.assertTrue(tmpFile.createNewFile()); FileOutputStream os = new FileOutputStream(tmpFile); for (int i = 0; i < 100; i++) { os.write("myString".getBytes(Charsets.UTF_8)); } os.close(); long originalModTime = fs.getFileStatus(new Path(tmpFile.getPath())).getModificationTime(); Assert.assertFalse(new File(tmpTarget, fileName).exists()); // Give a minimal gap between file creation and copy Thread.sleep(1000); // Negative case, not preserving the timestamp. tmpTarget.deleteOnExit(); EmbeddedGobblinDistcp embedded = new EmbeddedGobblinDistcp(new Path(tmpSource.getAbsolutePath()), new Path(tmpTarget.getAbsolutePath())); embedded.setLaunchTimeout(30, TimeUnit.SECONDS); embedded.run(); Assert.assertTrue(new File(tmpSource, fileName).exists()); Assert.assertTrue(new File(tmpTarget, fileName).exists()); long newModTime = fs.getFileStatus(new Path(new File(tmpTarget, fileName).getAbsolutePath())).getModificationTime(); Assert.assertTrue(newModTime != originalModTime); } public static class MyDataFileVersion implements DataFileVersionStrategy<Long>, DataFileVersionStrategy.DataFileVersionFactory<Long> { private static final Map<Path, Long> versions = new HashMap<>(); @Override public DataFileVersionStrategy<Long> createDataFileVersionStrategy(FileSystem fs, Config config) { return this; } @Override public Long getVersion(Path path) throws IOException { return versions.get(PathUtils.getPathWithoutSchemeAndAuthority(path)); } @Override public boolean setVersion(Path path, Long version) throws IOException { versions.put(PathUtils.getPathWithoutSchemeAndAuthority(path), version); return true; } @Override public boolean setDefaultVersion(Path path) throws IOException { return false; } @Override public Set<Characteristic> applicableCharacteristics() { return Sets.newHashSet(Characteristic.SETTABLE); } } }
2,273
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/ConversionHiveTestUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management; import java.io.IOException; import java.io.InputStream; import org.apache.avro.Schema; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.LongWatermark; public class ConversionHiveTestUtils { public static Schema readSchemaFromJsonFile(String directory, String filename) throws IOException { return new Schema.Parser() .parse(ConversionHiveTestUtils.class.getClassLoader() .getResourceAsStream(StringUtils.removeEnd(directory, Path.SEPARATOR) + Path.SEPARATOR + filename)); } public static String readQueryFromFile(String directory, String filename) throws IOException { InputStream is = ConversionHiveTestUtils.class.getClassLoader() .getResourceAsStream(StringUtils.removeEnd(directory, Path.SEPARATOR) + Path.SEPARATOR + filename); return IOUtils.toString(is, "UTF-8"); } public static WorkUnitState createWus(String dbName, String tableName, long watermark) { WorkUnitState wus = new WorkUnitState(); wus.setActualHighWatermark(new LongWatermark(watermark)); wus.setProp(ConfigurationKeys.DATASET_URN_KEY, dbName + "@" + tableName); wus.setProp(ConfigurationKeys.JOB_ID_KEY, "jobId"); return wus; } }
2,274
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/util/PathUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.util; import org.apache.gobblin.util.PathUtils; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; public class PathUtilsTest { @Test public void testMergePaths() throws Exception { Path path1 = new Path("/some/path"); Path path2 = new Path("/path2/file"); Assert.assertEquals(PathUtils.mergePaths(path1, path2).toString(), "/some/path/path2/file"); } @Test public void testRelativizePath() throws Exception { Path prefix = new Path("/prefix/path"); Path suffix = new Path("suffix/elements"); Path path1 = new Path(prefix, suffix); Assert.assertEquals(PathUtils.relativizePath(path1, prefix), suffix); Path path2 = new Path("/unrelated/path/with/more/elements"); Assert.assertEquals(PathUtils.relativizePath(path2, prefix), path2); } @Test public void testIsAncestor() throws Exception { Path ancestor = new Path("/some/path"); Assert.assertTrue(PathUtils.isAncestor(ancestor, new Path(ancestor, "more/elements"))); Assert.assertTrue(PathUtils.isAncestor(ancestor, ancestor)); Assert.assertFalse(PathUtils.isAncestor(ancestor, new Path("/unrelated/path"))); Assert.assertFalse(PathUtils.isAncestor(ancestor, new Path("relative/path"))); Assert.assertFalse(PathUtils.isAncestor(ancestor, ancestor.getParent())); Path relativeAncestor = new Path("relative/ancestor"); Assert.assertTrue(PathUtils.isAncestor(relativeAncestor, new Path(relativeAncestor, "more/elements"))); Assert.assertTrue(PathUtils.isAncestor(relativeAncestor, relativeAncestor)); Assert.assertFalse(PathUtils.isAncestor(relativeAncestor, new Path("/unrelated/path"))); Assert.assertFalse(PathUtils.isAncestor(relativeAncestor, new Path("relative/path"))); Assert.assertFalse(PathUtils.isAncestor(relativeAncestor, relativeAncestor.getParent())); } @Test public void testIsNotAncestor() throws Exception { Path ancestor = new Path("/user/gobblin/hourly"); Assert.assertFalse(PathUtils.isAncestor(ancestor, new Path("hdfs://clus-nn01.company.com:10000/user/gobblin/"))); } @Test public void testGetPathWithoutSchemeAndAuthority() throws Exception { Path schemeAndAuthority = new Path("hdfs://example.hdfs:9000/"); Path path = new Path("/some/path"); Path fullPath = new Path(schemeAndAuthority, path); Assert.assertTrue(fullPath.toString().startsWith("hdfs")); Assert.assertEquals(PathUtils.getPathWithoutSchemeAndAuthority(fullPath), path); } @Test public void testDeepestNonGlobPath() throws Exception { Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/*")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/*/*")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/a?b")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/*.avro")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/[abc]")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/{ab,bc}")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/path/*/files")), new Path("/path")); Assert.assertEquals(PathUtils.deepestNonGlobPath(new Path("/*")), new Path("/")); } @Test public void testRemoveExtension() throws Exception { Path path = PathUtils.removeExtension(new Path("file.txt"), ".txt"); Assert.assertEquals(path, new Path("file")); path = PathUtils.removeExtension(new Path("file.txt"), ".abc"); Assert.assertEquals(path, new Path("file.txt")); path = PathUtils.removeExtension(new Path("file.txt.gpg"), ".txt", ".gpg"); Assert.assertEquals(path, new Path("file")); path = PathUtils.removeExtension(new Path("file.txt.gpg"), ".gpg", ".txt"); Assert.assertEquals(path, new Path("file")); path = PathUtils.removeExtension(new Path("file.txt.gpg"), ".txt"); Assert.assertEquals(path, new Path("file.gpg")); path = PathUtils.removeExtension(new Path("file.txt.gpg"), ".gpg"); Assert.assertEquals(path, new Path("file.txt")); path = PathUtils.removeExtension(new Path("file"), ".txt", ".gpg"); Assert.assertEquals(path, new Path("file")); } @Test public void testAddExtension() throws Exception { Path path = PathUtils.addExtension(new Path("file"), ".txt"); Assert.assertEquals(path, new Path("file.txt")); path = PathUtils.addExtension(new Path("file.txt"), ".abc"); Assert.assertEquals(path, new Path("file.txt.abc")); path = PathUtils.addExtension(new Path("file.txt.gpg"), ".txt", ".gpg"); Assert.assertEquals(path, new Path("file.txt.gpg.txt.gpg")); path = PathUtils.addExtension(new Path("file.txt.gpg"), ".tar.gz"); Assert.assertEquals(path, new Path("file.txt.gpg.tar.gz")); } }
2,275
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/util/AvroSchemaCheckDefaultStrategyTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.util; import java.io.File; import org.apache.avro.Schema; import org.apache.gobblin.util.schema_check.AvroSchemaCheckDefaultStrategy; import org.junit.Assert; import org.testng.annotations.Test; public class AvroSchemaCheckDefaultStrategyTest { @Test public void testSchemCheckStrategy() throws Exception { //test when it's compatible Schema toValidate = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"null\",\"long\"],\"default\":null}]}"); Schema expected = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"null\",\"long\"],\"doc\":\"this is for test\",\"default\":null}]}"); AvroSchemaCheckDefaultStrategy strategy = new AvroSchemaCheckDefaultStrategy(); org.junit.Assert.assertTrue(strategy.compare(expected, toValidate)); //test when field name is different expected = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo1\",\"type\":[\"null\",\"long\"],\"doc\":\"this is for test\",\"default\":null}]}"); org.junit.Assert.assertFalse(strategy.compare(expected, toValidate)); //test when the type change expected = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"null\",\"int\"],\"doc\":\"this is for test\",\"default\":null}]}"); org.junit.Assert.assertFalse(strategy.compare(expected, toValidate)); expected = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"baseRecord\",\"fields\":[{\"name\":\"foo\",\"type\":[\"null\",\"float\"],\"doc\":\"this is for test\",\"default\":null}]}"); Assert.assertFalse(strategy.compare(expected, toValidate)); //test complex schema toValidate = new Schema.Parser().parse(new File(AvroSchemaCheckDefaultStrategy.class.getClassLoader().getResource("avroSchemaCheckStrategyTest/toValidateSchema.avsc").getFile())); expected = new Schema.Parser().parse(new File(AvroSchemaCheckDefaultStrategy.class.getClassLoader().getResource("avroSchemaCheckStrategyTest/expectedSchema.avsc").getFile())); Assert.assertTrue(strategy.compare(expected, toValidate)); } }
2,276
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/WatermarkDatasetVersionFinderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.util.Properties; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.data.management.retention.version.StringDatasetVersion; import org.apache.gobblin.data.management.retention.version.finder.WatermarkDatasetVersionFinder; public class WatermarkDatasetVersionFinderTest { @Test public void testVersionParser() { Properties props = new Properties(); WatermarkDatasetVersionFinder parser = new WatermarkDatasetVersionFinder(null, props); Assert.assertEquals(parser.versionClass(), StringDatasetVersion.class); Assert.assertEquals(parser.globVersionPattern(), new Path("*")); Assert.assertEquals(parser.getDatasetVersion(new Path("datasetVersion"), new Path("fullPath")).getVersion(), "datasetVersion"); Assert.assertEquals(parser.getDatasetVersion(new Path("datasetVersion"), new Path("fullPath")). getPathsToDelete().iterator().next(), new Path("fullPath")); } @Test public void testRegex() { Properties props = new Properties(); props.put(WatermarkDatasetVersionFinder.DEPRECATED_WATERMARK_REGEX_KEY, "watermark-([A-Za-z]*)-[a-z]*"); WatermarkDatasetVersionFinder parser = new WatermarkDatasetVersionFinder(null, props); Assert.assertEquals(parser.versionClass(), StringDatasetVersion.class); Assert.assertEquals(parser.globVersionPattern(), new Path("*")); Assert.assertEquals(parser.getDatasetVersion(new Path("watermark-actualVersion-test"), new Path("fullPath")).getVersion(), "actualVersion"); } }
2,277
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/TimeBasedRetentionPolicyTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import static org.hamcrest.MatcherAssert.assertThat; import java.util.List; import org.apache.commons.collections.ListUtils; import org.apache.hadoop.fs.Path; import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeUtils; import org.testng.annotations.AfterGroups; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.gobblin.data.management.retention.policy.TimeBasedRetentionPolicy; import org.apache.gobblin.data.management.version.TimestampedDatasetVersion; @Test(groups = { "SystemTimeTests"}) public class TimeBasedRetentionPolicyTest { @Test public void testISORetentionDuration() throws Exception { DateTimeUtils.setCurrentMillisFixed(new DateTime(2016, 2, 11, 10, 0, 0, 0).getMillis()); try { // 20 Days verify("P20D", ImmutableList.of(WithDate(new DateTime(2016, 1, 5, 10, 0, 0, 0)), WithDate(new DateTime(2016, 1, 6, 10, 0, 0, 0))), ImmutableList.of(WithDate(new DateTime(2016, 2, 10, 10, 0, 0, 0)), WithDate(new DateTime(2016, 2, 11, 10, 0, 0, 0)))); // 2 Months verify("P2M", ImmutableList.of(WithDate(new DateTime(2015, 12, 5, 10, 0, 0, 0)), WithDate(new DateTime(2015, 11, 5, 10, 0, 0, 0))), ImmutableList.of(WithDate(new DateTime(2016, 2, 10, 10, 0, 0, 0)), WithDate(new DateTime(2016, 1, 10, 10, 0, 0, 0)))); // 2 Years verify("P2Y", ImmutableList.of(WithDate(new DateTime(2014, 1, 5, 10, 0, 0, 0)), WithDate(new DateTime(2013, 1, 5, 10, 0, 0, 0))), ImmutableList.of(WithDate(new DateTime(2016, 2, 10, 10, 0, 0, 0)), WithDate(new DateTime(2015, 2, 10, 10, 0, 0, 0)))); // 20 Hours verify("PT20H", ImmutableList.of(WithDate(new DateTime(2016, 2, 10, 11, 0, 0, 0)), WithDate(new DateTime(2016, 2, 9, 11, 0, 0, 0))), ImmutableList.of(WithDate(new DateTime(2016, 2, 11, 8, 0, 0, 0)), WithDate(new DateTime(2016, 2, 11, 9, 0, 0, 0)))); } finally { // Restore time DateTimeUtils.setCurrentMillisSystem(); } } private static TimestampedDatasetVersion WithDate(DateTime dt){ return new TimestampedDatasetVersion(dt, new Path("test")); } private void verify(String duration, List<TimestampedDatasetVersion> toBeDeleted, List<TimestampedDatasetVersion> toBeRetained) { @SuppressWarnings("unchecked") List<TimestampedDatasetVersion> allVersions = ListUtils.union(toBeRetained, toBeDeleted); List<TimestampedDatasetVersion> deletableVersions = Lists.newArrayList(new TimeBasedRetentionPolicy(duration).listDeletableVersions(allVersions)); assertThat(deletableVersions, Matchers.containsInAnyOrder(toBeDeleted.toArray())); assertThat(deletableVersions, Matchers.not(Matchers.containsInAnyOrder(toBeRetained.toArray()))); } @AfterGroups("SystemTimeTests") public void resetSystemCurrentTime() { DateTimeUtils.setCurrentMillisSystem(); } }
2,278
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/FsCleanableHelperTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.List; import java.util.Properties; import java.util.Set; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.io.Files; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.data.management.retention.dataset.FsCleanableHelper; import org.apache.gobblin.data.management.trash.MockTrash; import org.apache.gobblin.data.management.version.FileSystemDatasetVersion; import org.apache.gobblin.dataset.FileSystemDataset; @Slf4j @Test(groups = { "gobblin.data.management.retention" }) public class FsCleanableHelperTest { private FileSystem fs; private Path testTempPath; @BeforeClass public void setup() throws Exception { this.fs = FileSystem.getLocal(new Configuration()); this.testTempPath = new Path(Files.createTempDir().getAbsolutePath(), "FsCleanableHelperTest"); this.fs.mkdirs(this.testTempPath); } @AfterClass public void cleanUp() { try { fs.delete(this.testTempPath, true); } catch (Exception e) { // ignore } } @Test public void testSimulateTrash() throws Exception { Properties props = new Properties(); props.setProperty(FsCleanableHelper.SIMULATE_KEY, Boolean.toString(true)); FsCleanableHelper fsCleanableHelper = new FsCleanableHelper(this.fs, props, ConfigFactory.empty(), log); assertThat(fsCleanableHelper.getTrash(), instanceOf(MockTrash.class)); } @Test public void testDeleteEmptyDirs() throws Exception { Properties props = new Properties(); props.setProperty(FsCleanableHelper.SKIP_TRASH_KEY, Boolean.toString(true)); FsCleanableHelper fsCleanableHelper = new FsCleanableHelper(this.fs, props, ConfigFactory.empty(), log); FileSystemDataset fsDataset = mock(FileSystemDataset.class); Path datasetRoot = new Path(testTempPath, "dataset1"); when(fsDataset.datasetRoot()).thenReturn(datasetRoot); // To delete Path deleted1 = new Path(datasetRoot, "2016/01/01/13"); Path deleted2 = new Path(datasetRoot, "2016/01/01/14"); Path deleted3 = new Path(datasetRoot, "2016/01/02/15"); // Do not delete Path notDeleted1 = new Path(datasetRoot, "2016/01/02/16"); this.fs.mkdirs(deleted1); this.fs.mkdirs(deleted2); this.fs.mkdirs(deleted3); this.fs.mkdirs(notDeleted1); // Make sure all paths are created Assert.assertTrue(this.fs.exists(deleted1)); Assert.assertTrue(this.fs.exists(deleted2)); Assert.assertTrue(this.fs.exists(deleted3)); Assert.assertTrue(this.fs.exists(notDeleted1)); List<FileSystemDatasetVersion> deletableVersions = ImmutableList.<FileSystemDatasetVersion> of( new MockFileSystemDatasetVersion(deleted1), new MockFileSystemDatasetVersion(deleted2), new MockFileSystemDatasetVersion(deleted3)); fsCleanableHelper.clean(deletableVersions, fsDataset); // Verify versions are deleted Assert.assertFalse(this.fs.exists(deleted1)); Assert.assertFalse(this.fs.exists(deleted2)); Assert.assertFalse(this.fs.exists(deleted3)); // Verify versions are not deleted Assert.assertTrue(this.fs.exists(notDeleted1)); // Verify empty parent dir "2016/01/01" is deleted Assert.assertFalse(this.fs.exists(deleted1.getParent())); // Verify non empty parent dir "2016/01/02" exists Assert.assertTrue(this.fs.exists(notDeleted1.getParent())); } @AllArgsConstructor private static class MockFileSystemDatasetVersion implements FileSystemDatasetVersion { @Getter private final Path path; @Override public Object getVersion() { return null; } @Override public int compareTo(FileSystemDatasetVersion o) { return 0; } @Override public Set<Path> getPaths() { return Sets.newHashSet(this.path); } } }
2,279
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/CleanableDatasetBaseTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Lists; import static org.mockito.Mockito.*; import org.apache.gobblin.data.management.retention.dataset.CleanableDatasetBase; import org.apache.gobblin.data.management.retention.policy.RetentionPolicy; import org.apache.gobblin.data.management.retention.version.DatasetVersion; import org.apache.gobblin.data.management.retention.version.StringDatasetVersion; import org.apache.gobblin.data.management.retention.version.finder.DatasetVersionFinder; import org.apache.gobblin.data.management.retention.version.finder.VersionFinder; import org.apache.gobblin.data.management.trash.TestTrash; import org.apache.gobblin.data.management.trash.Trash; public class CleanableDatasetBaseTest { @Test public void test() throws IOException { FileSystem fs = mock(FileSystem.class); Path datasetRoot = new Path("/test/dataset"); DatasetVersion dataset1Version1 = new StringDatasetVersion("version1", new Path(datasetRoot, "version1")); DatasetVersion dataset1Version2 = new StringDatasetVersion("version2", new Path(datasetRoot, "version2")); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(fs.exists(any(Path.class))).thenReturn(true); DatasetImpl dataset = new DatasetImpl(fs, false, false, false, false, datasetRoot); when(dataset.versionFinder.findDatasetVersions(dataset)). thenReturn(Lists.newArrayList(dataset1Version1, dataset1Version2)); dataset.clean(); Assert.assertEquals(dataset.getTrash().getDeleteOperations().size(), 1); Assert.assertTrue(dataset.getTrash().getDeleteOperations().get(0).getPath() .equals(dataset1Version2.getPathsToDelete().iterator().next())); } @Test public void testSkipTrash() throws IOException { FileSystem fs = mock(FileSystem.class); Trash trash = mock(Trash.class); Path datasetRoot = new Path("/test/dataset"); DatasetVersion dataset1Version1 = new StringDatasetVersion("version1", new Path(datasetRoot, "version1")); DatasetVersion dataset1Version2 = new StringDatasetVersion("version2", new Path(datasetRoot, "version2")); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(trash.moveToTrash(any(Path.class))).thenReturn(true); when(fs.exists(any(Path.class))).thenReturn(true); DatasetImpl dataset = new DatasetImpl(fs, false, true, false, false, datasetRoot); when(dataset.versionFinder.findDatasetVersions(dataset)). thenReturn(Lists.newArrayList(dataset1Version1, dataset1Version2)); dataset.clean(); Assert.assertEquals(dataset.getTrash().getDeleteOperations().size(), 1); Assert.assertTrue(dataset.getTrash().getDeleteOperations().get(0).getPath() .equals(dataset1Version2.getPathsToDelete().iterator().next())); Assert.assertTrue(dataset.getTrash().isSkipTrash()); } @Test public void testSimulate() throws IOException { FileSystem fs = mock(FileSystem.class); Trash trash = mock(Trash.class); Path datasetRoot = new Path("/test/dataset"); DatasetVersion dataset1Version1 = new StringDatasetVersion("version1", new Path(datasetRoot, "version1")); DatasetVersion dataset1Version2 = new StringDatasetVersion("version2", new Path(datasetRoot, "version2")); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(trash.moveToTrash(any(Path.class))).thenReturn(true); when(fs.exists(any(Path.class))).thenReturn(true); DatasetImpl dataset = new DatasetImpl(fs, true, false, false, false, datasetRoot); when(dataset.versionFinder.findDatasetVersions(dataset)). thenReturn(Lists.newArrayList(dataset1Version1, dataset1Version2)); dataset.clean(); Assert.assertEquals(dataset.getTrash().getDeleteOperations().size(), 1); Assert.assertTrue(dataset.getTrash().getDeleteOperations().get(0).getPath() .equals(dataset1Version2.getPathsToDelete().iterator().next())); Assert.assertTrue(dataset.getTrash().isSimulate()); } @Test public void testDeleteEmptyDirectories() throws IOException { FileSystem fs = mock(FileSystem.class); Trash trash = mock(Trash.class); Path datasetRoot = new Path("/test/dataset"); DatasetVersion dataset1Version1 = new StringDatasetVersion("version1", new Path(datasetRoot, "parent/version1")); DatasetVersion dataset1Version2 = new StringDatasetVersion("version2", new Path(datasetRoot, "parent/version2")); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(trash.moveToTrash(any(Path.class))).thenReturn(true); when(fs.exists(any(Path.class))).thenReturn(true); DatasetImpl dataset = new DatasetImpl(fs, false, false, true, false, datasetRoot); when(dataset.versionFinder.findDatasetVersions(dataset)). thenReturn(Lists.newArrayList(dataset1Version1, dataset1Version2)); when(fs.listStatus(any(Path.class))).thenReturn(new FileStatus[]{}); dataset.clean(); Assert.assertEquals(dataset.getTrash().getDeleteOperations().size(), 1); Assert.assertTrue(dataset.getTrash().getDeleteOperations().get(0).getPath() .equals(dataset1Version2.getPathsToDelete().iterator().next())); verify(fs).listStatus(dataset1Version2.getPathsToDelete().iterator().next().getParent()); verify(fs, times(1)).listStatus(any(Path.class)); verify(fs).delete(dataset1Version2.getPathsToDelete().iterator().next().getParent(), false); verify(fs, times(1)).delete(any(Path.class), eq(false)); verify(fs, never()).delete(any(Path.class), eq(true)); } private class DeleteFirstRetentionPolicy implements RetentionPolicy<StringDatasetVersion> { @Override public Class<? extends DatasetVersion> versionClass() { return StringDatasetVersion.class; } @Override public Collection<StringDatasetVersion> listDeletableVersions(List<StringDatasetVersion> allVersions) { return Lists.newArrayList(allVersions.get(0)); } } private class DatasetImpl extends CleanableDatasetBase { public DatasetVersionFinder versionFinder = mock(DatasetVersionFinder.class); public RetentionPolicy retentionPolicy = new DeleteFirstRetentionPolicy(); public Path path; public DatasetImpl(FileSystem fs, boolean simulate, boolean skipTrash, boolean deleteEmptyDirectories, boolean deleteAsOwner, Path path) throws IOException { super(fs, TestTrash.propertiesForTestTrash(), simulate, skipTrash, deleteEmptyDirectories, deleteAsOwner, LoggerFactory.getLogger(DatasetImpl.class)); when(versionFinder.versionClass()).thenReturn(StringDatasetVersion.class); this.path = path; } @Override public VersionFinder getVersionFinder() { return this.versionFinder; } @Override public RetentionPolicy getRetentionPolicy() { return this.retentionPolicy; } @Override public Path datasetRoot() { return this.path; } public TestTrash getTrash() { return (TestTrash) this.trash; } } }
2,280
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/ConfigurableCleanableDatasetTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.net.URI; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.policy.EmbeddedRetentionSelectionPolicy; import org.apache.gobblin.data.management.policy.NewestKSelectionPolicy; import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset; import org.apache.gobblin.data.management.version.FileSystemDatasetVersion; import org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder; public class ConfigurableCleanableDatasetTest { @Test public void testConfigureWithRetentionPolicy() throws Exception { Config conf = ConfigFactory.parseMap(ImmutableMap.<String, String> of("gobblin.retention.version.finder.class", "org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder", "gobblin.retention.retention.policy.class", "org.apache.gobblin.data.management.retention.policy.NewestKRetentionPolicy", "gobblin.retention.newestK.versions.retained", "2")); ConfigurableCleanableDataset<FileSystemDatasetVersion> dataset = new ConfigurableCleanableDataset<FileSystemDatasetVersion>(FileSystem.get(new URI(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), new Properties(), new Path("/someroot"), conf, LoggerFactory.getLogger(ConfigurableCleanableDatasetTest.class)); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionSelectionPolicy().getClass(), EmbeddedRetentionSelectionPolicy.class); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionFinder().getClass(), WatermarkDatasetVersionFinder.class); Assert.assertEquals(dataset.isDatasetBlacklisted(), false); } @Test public void testConfigureWithSelectionPolicy() throws Exception { Config conf = ConfigFactory.parseMap(ImmutableMap.<String, String> of("gobblin.retention.version.finder.class", "org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder", "gobblin.retention.selection.policy.class", "org.apache.gobblin.data.management.policy.NewestKSelectionPolicy", "gobblin.retention.selection.newestK.versionsSelected", "2")); ConfigurableCleanableDataset<FileSystemDatasetVersion> dataset = new ConfigurableCleanableDataset<FileSystemDatasetVersion>(FileSystem.get(new URI(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), new Properties(), new Path("/someroot"), conf, LoggerFactory.getLogger(ConfigurableCleanableDatasetTest.class)); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionSelectionPolicy().getClass(), NewestKSelectionPolicy.class); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionFinder().getClass(), WatermarkDatasetVersionFinder.class); Assert.assertEquals(dataset.isDatasetBlacklisted(), false); } @Test public void testConfigureWithMulitplePolicies() throws Exception { Map<String, String> partitionConf = ImmutableMap.<String, String> of("version.finder.class", "org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder", "selection.policy.class", "org.apache.gobblin.data.management.policy.NewestKSelectionPolicy", "selection.newestK.versionsSelected", "2"); Config conf = ConfigFactory.parseMap(ImmutableMap.<String, List<Map<String, String>>> of("gobblin.retention.dataset.partitions", ImmutableList.of(partitionConf, partitionConf))); ConfigurableCleanableDataset<FileSystemDatasetVersion> dataset = new ConfigurableCleanableDataset<FileSystemDatasetVersion>(FileSystem.get(new URI(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), new Properties(), new Path("/someroot"), conf, LoggerFactory.getLogger(ConfigurableCleanableDatasetTest.class)); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionSelectionPolicy().getClass(), NewestKSelectionPolicy.class); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(0).getVersionFinder().getClass(), WatermarkDatasetVersionFinder.class); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(1).getVersionSelectionPolicy().getClass(), NewestKSelectionPolicy.class); Assert.assertEquals(dataset.getVersionFindersAndPolicies().get(1).getVersionFinder().getClass(), WatermarkDatasetVersionFinder.class); Assert.assertEquals(dataset.isDatasetBlacklisted(), false); } @Test public void testDatasetIsBlacklisted() throws Exception { Config conf = ConfigFactory.parseMap(ImmutableMap.<String, String> of("gobblin.retention.version.finder.class", "org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder", "gobblin.retention.selection.policy.class", "org.apache.gobblin.data.management.policy.NewestKSelectionPolicy", "gobblin.retention.selection.newestK.versionsSelected", "2", "gobblin.retention.dataset.is.blacklisted", "true")); ConfigurableCleanableDataset<FileSystemDatasetVersion> dataset = new ConfigurableCleanableDataset<FileSystemDatasetVersion>(FileSystem.get(new URI(ConfigurationKeys.LOCAL_FS_URI), new Configuration()), new Properties(), new Path("/someroot"), conf, LoggerFactory.getLogger(ConfigurableCleanableDatasetTest.class)); Assert.assertEquals(dataset.isDatasetBlacklisted(), true); } }
2,281
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/UnixTimestampVersionFinderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion; import org.apache.gobblin.data.management.retention.version.finder.UnixTimestampVersionFinder; import org.apache.gobblin.data.management.retention.version.finder.WatermarkDatasetVersionFinder; import java.util.Properties; import org.apache.hadoop.fs.Path; import org.joda.time.DateTime; import org.testng.Assert; import org.testng.annotations.Test; public class UnixTimestampVersionFinderTest { @Test public void test() { Properties props = new Properties(); props.put(WatermarkDatasetVersionFinder.DEPRECATED_WATERMARK_REGEX_KEY, "watermark-([0-9]*)-[a-z]*"); UnixTimestampVersionFinder parser = new UnixTimestampVersionFinder(null, props); DateTime time = new DateTime(2015,1,2,10,15); Assert.assertEquals(parser.versionClass(), TimestampedDatasetVersion.class); Assert.assertEquals(parser.globVersionPattern(), new Path("*")); Assert.assertEquals(parser.getDatasetVersion(new Path("watermark-" + time.getMillis() + "-test"), new Path("fullPath")).getDateTime(), time); } }
2,282
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/TimestampedDatasetVersionFinderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.io.Files; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion; import org.apache.gobblin.data.management.retention.version.finder.DateTimeDatasetVersionFinder; import org.apache.gobblin.util.PathUtils; public class TimestampedDatasetVersionFinderTest { private FileSystem fs; private Path testDataPathDummyPath; @BeforeClass public void setup() throws Exception { this.fs = FileSystem.get(new Configuration()); this.testDataPathDummyPath = new Path(Files.createTempDir().getAbsolutePath()); this.fs.mkdirs(this.testDataPathDummyPath); } @Test public void testVersionParser() throws Exception { Properties props = new Properties(); props.put(DateTimeDatasetVersionFinder.RETENTION_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/hh/mm"); DateTimeDatasetVersionFinder parser = new DateTimeDatasetVersionFinder(this.fs, props); Assert.assertEquals(parser.versionClass(), TimestampedDatasetVersion.class); Assert.assertEquals(parser.globVersionPattern(), new Path("*/*/*/*/*")); DateTime version = parser.getDatasetVersion(new Path("2015/06/01/10/12"), this.fs.getFileStatus(testDataPathDummyPath)).getDateTime(); Assert.assertEquals(version.getZone(), DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME)); Assert.assertEquals(version, new DateTime(2015, 6, 1, 10, 12, 0, 0, DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME))); Assert.assertEquals( PathUtils.getPathWithoutSchemeAndAuthority(parser .getDatasetVersion(new Path("2015/06/01/10/12"), this.fs.getFileStatus(testDataPathDummyPath)) .getPathsToDelete().iterator().next()), PathUtils.getPathWithoutSchemeAndAuthority(this.testDataPathDummyPath)); } @Test public void testVersionParserWithTimeZone() throws Exception { Properties props = new Properties(); props.put(DateTimeDatasetVersionFinder.RETENTION_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/hh/mm"); props.put(DateTimeDatasetVersionFinder.RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY, "UTC"); DateTimeDatasetVersionFinder parser = new DateTimeDatasetVersionFinder(this.fs, props); Assert.assertEquals(parser.versionClass(), TimestampedDatasetVersion.class); Assert.assertEquals(parser.globVersionPattern(), new Path("*/*/*/*/*")); DateTime version = parser.getDatasetVersion(new Path("2015/06/01/10/12"), this.fs.getFileStatus(testDataPathDummyPath)).getDateTime(); Assert.assertEquals(version.getZone(), DateTimeZone.forID("UTC")); Assert.assertEquals(version, new DateTime(2015, 6, 1, 10, 12, 0, 0, DateTimeZone.forID("UTC"))); Assert.assertEquals( PathUtils.getPathWithoutSchemeAndAuthority(parser .getDatasetVersion(new Path("2015/06/01/10/12"), this.fs.getFileStatus(testDataPathDummyPath)) .getPathsToDelete().iterator().next()), PathUtils.getPathWithoutSchemeAndAuthority(this.testDataPathDummyPath)); } @AfterClass public void after() throws Exception { this.fs.delete(this.testDataPathDummyPath, true); } }
2,283
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/CombineRetentionPolicyTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import org.apache.gobblin.data.management.retention.policy.CombineRetentionPolicy; import org.apache.gobblin.data.management.retention.test.ContainsARetentionPolicy; import org.apache.gobblin.data.management.retention.test.ContainsBRetentionPolicy; import org.apache.gobblin.data.management.retention.test.ContainsCRetentionPolicy; import org.apache.gobblin.data.management.version.DatasetVersion; import org.apache.gobblin.data.management.version.FileStatusDatasetVersion; import org.apache.gobblin.data.management.version.StringDatasetVersion; import org.apache.gobblin.data.management.version.TimestampedDatasetVersion; import java.io.IOException; import java.util.Collection; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Lists; import com.google.common.collect.Sets; public class CombineRetentionPolicyTest { @Test public void testConfig() throws IOException { Properties props = new Properties(); props.setProperty(CombineRetentionPolicy.COMBINE_RETENTION_POLICIES, "ContainsA," + ContainsBRetentionPolicy.class.getCanonicalName() + "," + ContainsCRetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.DELETE_SETS_COMBINE_OPERATION, CombineRetentionPolicy.DeletableCombineOperation.UNION.name()); CombineRetentionPolicy<DatasetVersion> policy = new CombineRetentionPolicy<>(props); Collection<DatasetVersion> deletableVersions = policy.listDeletableVersions(Lists .newArrayList(new StringDatasetVersion("a", new Path("/")), new StringDatasetVersion("abc", new Path("/")), new StringDatasetVersion("abcd", new Path("/")), new StringDatasetVersion("bc", new Path("/")), new StringDatasetVersion("d", new Path("/")))); Set<String> actualDeletableVersions = deletableVersions.stream().map(input -> ((StringDatasetVersion) input).getVersion()).collect(Collectors.toSet()); Assert.assertEquals(policy.versionClass(), StringDatasetVersion.class); Assert.assertEquals(deletableVersions.size(), 4); Assert.assertEquals(actualDeletableVersions, Sets.newHashSet("abcd", "abc", "a", "bc")); } @Test public void testIntersect() throws IOException { Properties props = new Properties(); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "1", ContainsARetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "2", ContainsBRetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "3", ContainsCRetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.DELETE_SETS_COMBINE_OPERATION, CombineRetentionPolicy.DeletableCombineOperation.INTERSECT.name()); CombineRetentionPolicy<DatasetVersion> policy = new CombineRetentionPolicy<>(props); Collection<DatasetVersion> deletableVersions = policy.listDeletableVersions(Lists .newArrayList(new StringDatasetVersion("a", new Path("/")), new StringDatasetVersion("abc", new Path("/")), new StringDatasetVersion("abcd", new Path("/")), new StringDatasetVersion("bc", new Path("/")), new StringDatasetVersion("d", new Path("/")))); Set<String> actualDeletableVersions = deletableVersions.stream().map(input -> ((StringDatasetVersion) input).getVersion()).collect(Collectors.toSet()); Assert.assertEquals(policy.versionClass(), StringDatasetVersion.class); Assert.assertEquals(deletableVersions.size(), 2); Assert.assertEquals(actualDeletableVersions, Sets.newHashSet("abcd", "abc")); } @Test public void testUnion() throws IOException { Properties props = new Properties(); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "1", ContainsARetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "2", ContainsBRetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "3", ContainsCRetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.DELETE_SETS_COMBINE_OPERATION, CombineRetentionPolicy.DeletableCombineOperation.UNION.name()); CombineRetentionPolicy<DatasetVersion> policy = new CombineRetentionPolicy<>(props); Collection<DatasetVersion> deletableVersions = policy.listDeletableVersions(Lists .newArrayList(new StringDatasetVersion("a", new Path("/")), new StringDatasetVersion("abc", new Path("/")), new StringDatasetVersion("abcd", new Path("/")), new StringDatasetVersion("bc", new Path("/")), new StringDatasetVersion("d", new Path("/")))); Set<String> actualDeletableVersions = deletableVersions.stream().map(input -> ((StringDatasetVersion) input).getVersion()) .collect(Collectors.toSet()); Assert.assertEquals(deletableVersions.size(), 4); Assert.assertEquals(actualDeletableVersions, Sets.newHashSet("abcd", "abc", "a", "bc")); } @Test public void testCommonSuperclass() throws IOException { Properties props = new Properties(); props.setProperty(CombineRetentionPolicy.RETENTION_POLICIES_PREFIX + "1", ContainsARetentionPolicy.class.getCanonicalName()); props.setProperty(CombineRetentionPolicy.DELETE_SETS_COMBINE_OPERATION, CombineRetentionPolicy.DeletableCombineOperation.INTERSECT.name()); CombineRetentionPolicy policy = new CombineRetentionPolicy(props); Assert.assertEquals(policy.commonSuperclass(StringDatasetVersion.class, StringDatasetVersion.class), StringDatasetVersion.class); Assert.assertEquals(policy.commonSuperclass(StringDatasetVersion.class, TimestampedDatasetVersion.class), DatasetVersion.class); Assert.assertEquals(policy.commonSuperclass(StringDatasetVersion.class, FileStatusDatasetVersion.class), StringDatasetVersion.class); Assert.assertEquals(policy.commonSuperclass(FileStatusDatasetVersion.class, StringDatasetVersion.class), StringDatasetVersion.class); Assert.assertEquals(policy.commonSuperclass(DatasetVersion.class, StringDatasetVersion.class), DatasetVersion.class); } }
2,284
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/NewestKRetentionPolicyTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.util.List; import java.util.Properties; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Lists; import org.apache.gobblin.data.management.retention.policy.NewestKRetentionPolicy; import org.apache.gobblin.data.management.retention.version.DatasetVersion; import org.apache.gobblin.data.management.retention.version.StringDatasetVersion; public class NewestKRetentionPolicyTest { @Test public void test() { Properties props = new Properties(); NewestKRetentionPolicy policy = new NewestKRetentionPolicy(props); StringDatasetVersion datasetVersion1 = new StringDatasetVersion("000_newest", new Path("test")); StringDatasetVersion datasetVersion2 = new StringDatasetVersion("001_mid", new Path("test")); StringDatasetVersion datasetVersion3 = new StringDatasetVersion("002_oldest", new Path("test")); Assert.assertEquals(policy.versionClass(), org.apache.gobblin.data.management.version.DatasetVersion.class); List<DatasetVersion> versions = Lists.newArrayList(); versions.add(datasetVersion1); versions.add(datasetVersion2); versions.add(datasetVersion3); List<DatasetVersion> deletableVersions = Lists.newArrayList(policy.listDeletableVersions(versions)); Assert.assertEquals(deletableVersions.size(),1); Assert.assertEquals(((StringDatasetVersion) deletableVersions.get(0)).getVersion(), "002_oldest"); } }
2,285
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/DatasetVersionFinderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.IOException; import java.util.List; import java.util.Properties; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Lists; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import org.apache.gobblin.data.management.retention.dataset.CleanableDataset; import org.apache.gobblin.data.management.retention.version.DatasetVersion; import org.apache.gobblin.data.management.retention.version.StringDatasetVersion; import org.apache.gobblin.data.management.retention.version.finder.DatasetVersionFinder; import org.apache.gobblin.dataset.FileSystemDataset; public class DatasetVersionFinderTest { @Test public void test() throws IOException { FileSystem fs = mock(FileSystem.class); String datasetPathStr = "/path/to/dataset"; String dataset1 = "datasetVersion1"; String dataset2 = "datasetVersion2"; Path datasetPath = new Path(datasetPathStr); Path globbedPath = new Path(datasetPathStr + "/*"); Path datasetVersion1 = new Path(datasetPathStr + "/" + dataset1); Path datasetVersion2 = new Path(datasetPathStr + "/" + dataset2); when(fs.globStatus(globbedPath)). thenReturn(new FileStatus[]{new FileStatus(0, true, 0, 0, 0, datasetVersion1), new FileStatus(0, true, 0, 0, 0, datasetVersion2)}); DatasetVersionFinder<StringDatasetVersion> versionFinder = new MockDatasetVersionFinder(fs, new Properties()); List<StringDatasetVersion> datasetVersions = Lists.newArrayList(versionFinder.findDatasetVersions(new MockDataset(datasetPath))); Assert.assertEquals(datasetVersions.size(), 2); Assert.assertEquals(datasetVersions.get(0).getVersion(), dataset1); Assert.assertEquals(datasetVersions.get(0).getPathsToDelete().iterator().next(), datasetVersion1); Assert.assertEquals(datasetVersions.get(1).getVersion(), dataset2); Assert.assertEquals(datasetVersions.get(1).getPathsToDelete().iterator().next(), datasetVersion2); } public static class MockDatasetVersionFinder extends DatasetVersionFinder<StringDatasetVersion> { public MockDatasetVersionFinder(FileSystem fs, Properties props) { super(fs, props); } @Override public Class<? extends DatasetVersion> versionClass() { return StringDatasetVersion.class; } @Override public Path globVersionPattern() { return new Path("*"); } @Override public StringDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) { return new StringDatasetVersion(fullPath.getName(), fullPath); } } public static class MockDataset implements CleanableDataset, FileSystemDataset { private final Path datasetRoot; public MockDataset(Path datasetRoot) { this.datasetRoot = datasetRoot; } @Override public void clean() throws IOException { } @Override public Path datasetRoot() { return this.datasetRoot; } @Override public String datasetURN() { return datasetRoot().toString(); } } }
2,286
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/CleanableMysqlDatasetStoreDatasetTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.typesafe.config.Config; import com.typesafe.config.ConfigValueFactory; import com.zaxxer.hikari.HikariDataSource; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.data.management.retention.dataset.CleanableDataset; import org.apache.gobblin.data.management.retention.dataset.finder.TimeBasedDatasetStoreDatasetFinder; import org.apache.gobblin.metastore.DatasetStateStore; import org.apache.gobblin.metastore.DatasetStoreDataset; import org.apache.gobblin.metastore.MysqlStateStore; import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase; import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory; import org.apache.gobblin.runtime.JobState; import org.apache.gobblin.runtime.MysqlDatasetStateStore; import org.apache.gobblin.runtime.TaskState; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; /** * Unit test for cleaning {@link MysqlStateStore} and {@link MysqlDatasetStateStore} */ @Test(singleThreaded = true, groups = {"disabledOnCI"}) public class CleanableMysqlDatasetStoreDatasetTest { private static final String TEST_STATE_STORE = "TestStateStore"; private static final String TEST_JOB_NAME1 = "TestJob1"; private static final String TEST_JOB_NAME2 = "TestJob2"; private static final String TEST_JOB_ID = "TestJobId"; private static final String TEST_TASK_ID_PREFIX = "TestTask-"; private static final String TEST_DATASET_URN1 = "TestDataset1"; private static final String TEST_DATASET_URN2 = "TestDataset2"; private MysqlStateStore<JobState> dbJobStateStore; private DatasetStateStore<JobState.DatasetState> dbDatasetStateStore; private long startTime = System.currentTimeMillis(); private Config config; private ITestMetastoreDatabase testMetastoreDatabase; private static final String TEST_USER = "testUser"; private static final String TEST_PASSWORD = "testPassword"; private static String getJobId(String jobIdBase, int index) { return jobIdBase + index; } @BeforeClass public void setUp() throws Exception { this.testMetastoreDatabase = TestMetastoreDatabaseFactory.get(); String jdbcUrl = this.testMetastoreDatabase.getJdbcUrl(); ConfigBuilder configBuilder = ConfigBuilder.create(); HikariDataSource dataSource = new HikariDataSource(); dataSource.setDriverClassName(ConfigurationKeys.DEFAULT_STATE_STORE_DB_JDBC_DRIVER); dataSource.setAutoCommit(false); dataSource.setJdbcUrl(jdbcUrl); dataSource.setUsername(TEST_USER); dataSource.setPassword(TEST_PASSWORD); this.dbJobStateStore = new MysqlStateStore<>(dataSource, TEST_STATE_STORE, false, JobState.class); configBuilder.addPrimitive("selection.timeBased.lookbackTime", "10m"); configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_TYPE_KEY, "mysql"); configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, TEST_STATE_STORE); configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_DB_URL_KEY, jdbcUrl); configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_DB_USER_KEY, TEST_USER); configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, TEST_PASSWORD); ClassAliasResolver<DatasetStateStore.Factory> resolver = new ClassAliasResolver<>(DatasetStateStore.Factory.class); DatasetStateStore.Factory stateStoreFactory = resolver.resolveClass("mysql").newInstance(); this.config = configBuilder.build(); this.dbDatasetStateStore = stateStoreFactory.createStateStore(configBuilder.build()); // clear data that may have been left behind by a prior test run this.dbJobStateStore.delete(TEST_JOB_NAME1); this.dbDatasetStateStore.delete(TEST_JOB_NAME1); this.dbJobStateStore.delete(TEST_JOB_NAME2); this.dbDatasetStateStore.delete(TEST_JOB_NAME2); } /** * Test cleanup of the job state store * @throws IOException */ @Test(enabled = false) public void testCleanJobStateStore() throws IOException { JobState jobState = new JobState(TEST_JOB_NAME1, getJobId(TEST_JOB_ID, 1)); jobState.setId(getJobId(TEST_JOB_ID, 1)); jobState.setProp("foo", "bar"); jobState.setState(JobState.RunningState.COMMITTED); jobState.setStartTime(this.startTime); jobState.setEndTime(this.startTime + 1000); jobState.setDuration(1000); for (int i = 0; i < 3; i++) { TaskState taskState = new TaskState(); taskState.setJobId(getJobId(TEST_JOB_ID, 1)); taskState.setTaskId(TEST_TASK_ID_PREFIX + i); taskState.setId(TEST_TASK_ID_PREFIX + i); taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED); jobState.addTaskState(taskState); } // set old time to test that this state is deleted this.dbJobStateStore.setTestTimestamp(System.currentTimeMillis()/1000 - (60 * 20)); this.dbJobStateStore.put(TEST_JOB_NAME1, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX, jobState); jobState.setJobName(TEST_JOB_NAME2); jobState.setJobId(getJobId(TEST_JOB_ID, 2)); jobState.setId(getJobId(TEST_JOB_ID, 2)); // set current time to test that the state is not deleted this.dbJobStateStore.setTestTimestamp(0); this.dbJobStateStore.put(TEST_JOB_NAME2, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX, jobState); TimeBasedDatasetStoreDatasetFinder datasetFinder = new TimeBasedDatasetStoreDatasetFinder(FileSystem.get(new Configuration()), ConfigUtils.configToProperties(config)); List<DatasetStoreDataset> datasets = datasetFinder.findDatasets(); Assert.assertTrue(this.dbJobStateStore.exists(TEST_JOB_NAME1, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbJobStateStore.exists(TEST_JOB_NAME2, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); for (DatasetStoreDataset dataset : datasets) { ((CleanableDataset) dataset).clean(); } Assert.assertFalse(this.dbJobStateStore.exists(TEST_JOB_NAME1, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); // state with recent timestamp is not deleted Assert.assertTrue(this.dbJobStateStore.exists(TEST_JOB_NAME2, MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); } /** * Test cleanup of the dataset state store. This test uses the combined selection policy to test that the newest 2 * entries are retained even when the timestamp is old. * * @throws IOException */ @Test(enabled = false) public void testCleanDatasetStateStore() throws IOException { JobState.DatasetState datasetState = new JobState.DatasetState(TEST_JOB_NAME1, getJobId(TEST_JOB_ID, 1)); datasetState.setDatasetUrn(TEST_DATASET_URN1); datasetState.setState(JobState.RunningState.COMMITTED); datasetState.setId(TEST_DATASET_URN1); datasetState.setStartTime(this.startTime); datasetState.setEndTime(this.startTime + 1000); datasetState.setDuration(1000); for (int i = 0; i < 3; i++) { TaskState taskState = new TaskState(); taskState.setJobId(getJobId(TEST_JOB_ID, 1)); taskState.setTaskId(TEST_TASK_ID_PREFIX + i); taskState.setId(TEST_TASK_ID_PREFIX + i); taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED); datasetState.addTaskState(taskState); } // set old time to test that this state is deleted ((MysqlDatasetStateStore)this.dbDatasetStateStore).setTestTimestamp(this.startTime/1000 + 2 - (60 * 20)); datasetState.setJobId(getJobId(TEST_JOB_ID, 1)); this.dbDatasetStateStore.persistDatasetState(TEST_DATASET_URN1, datasetState); ((MysqlDatasetStateStore)this.dbDatasetStateStore).setTestTimestamp(this.startTime/1000 + 4 - (60 * 20)); datasetState.setJobId(getJobId(TEST_JOB_ID, 2)); this.dbDatasetStateStore.persistDatasetState(TEST_DATASET_URN1, datasetState); ((MysqlDatasetStateStore)this.dbDatasetStateStore).setTestTimestamp(this.startTime/1000 + 6 - (60 * 20)); datasetState.setJobId(getJobId(TEST_JOB_ID, 3)); this.dbDatasetStateStore.persistDatasetState(TEST_DATASET_URN1, datasetState); datasetState.setJobId(getJobId(TEST_JOB_ID, 1)); // persist a second dataset state to test that retrieval of multiple dataset states works datasetState.setDatasetUrn(TEST_DATASET_URN2); datasetState.setId(TEST_DATASET_URN2); datasetState.setDuration(2000); // set current time to test that the state is not deleted ((MysqlDatasetStateStore)this.dbDatasetStateStore).setTestTimestamp(0); this.dbDatasetStateStore.persistDatasetState(TEST_DATASET_URN2, datasetState); datasetState.setJobName(TEST_JOB_NAME2); this.dbDatasetStateStore.persistDatasetState(TEST_DATASET_URN2, datasetState); Config cleanerConfig = config .withValue("gobblin.retention.selection.policy.class", ConfigValueFactory.fromAnyRef("org.apache.gobblin.data.management.policy.CombineSelectionPolicy")) .withValue("gobblin.retention.selection.combine.operation", ConfigValueFactory.fromAnyRef("intersect")) .withValue("gobblin.retention.selection.combine.policy.classes", ConfigValueFactory.fromAnyRef("org.apache.gobblin.data.management.policy.SelectBeforeTimeBasedPolicy,org.apache.gobblin.data.management.policy.NewestKSelectionPolicy")) .withValue("gobblin.retention.selection.timeBased.lookbackTime", ConfigValueFactory.fromAnyRef("10m")) .withValue("gobblin.retention.selection.newestK.versionsNotSelected", ConfigValueFactory.fromAnyRef("2")); TimeBasedDatasetStoreDatasetFinder datasetFinder = new TimeBasedDatasetStoreDatasetFinder(FileSystem.get(new Configuration()), ConfigUtils.configToProperties(cleanerConfig)); List<DatasetStoreDataset> datasets = datasetFinder.findDatasets(); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 2) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 3) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN2 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN2 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME2, TEST_DATASET_URN2 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME2, TEST_DATASET_URN2 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); for (DatasetStoreDataset dataset : datasets) { ((CleanableDataset) dataset).clean(); } // the most recent two entries (current and job id 3) should be retained Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 3) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertFalse(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertFalse(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN1 + "-" + getJobId(TEST_JOB_ID, 2) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN2 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME1, TEST_DATASET_URN2 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME2, TEST_DATASET_URN2 + "-" + MysqlDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); Assert.assertTrue(this.dbDatasetStateStore.exists(TEST_JOB_NAME2, TEST_DATASET_URN2 + "-" + getJobId(TEST_JOB_ID, 1) + MysqlDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX)); } }
2,287
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/CleanableDatasetStoreDatasetTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.File; import java.io.IOException; import java.util.List; import java.util.Properties; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.retention.dataset.CleanableDataset; import org.apache.gobblin.data.management.retention.dataset.finder.TimeBasedDatasetStoreDatasetFinder; import org.apache.gobblin.metastore.DatasetStoreDataset; import org.apache.gobblin.runtime.FsDatasetStateStore; import org.apache.gobblin.runtime.JobState; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.io.Files; /** * Unit test for {@link org.apache.gobblin.data.management.retention.dataset.CleanableDatasetStoreDataset} */ public class CleanableDatasetStoreDatasetTest { @Test public void testCleanStateStore() throws IOException { File tmpDir = Files.createTempDir(); tmpDir.deleteOnExit(); FileSystem fs = FileSystem.getLocal(new Configuration()); FsDatasetStateStore store = new FsDatasetStateStore(fs, tmpDir.getAbsolutePath()); store.persistDatasetState("dataset1", new JobState.DatasetState("job1", "job1_id1")); store.persistDatasetState("dataset1", new JobState.DatasetState("job1", "job1_id2")); store.persistDatasetState("dataset1", new JobState.DatasetState("job2", "job2_id1")); store.persistDatasetState("", new JobState.DatasetState("job3", "job3_id1")); Properties props = new Properties(); props.setProperty(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, tmpDir.getAbsolutePath()); props.setProperty("selection.timeBased.lookbackTime", "0m"); TimeBasedDatasetStoreDatasetFinder datasetFinder = new TimeBasedDatasetStoreDatasetFinder(fs, props); List<DatasetStoreDataset> datasets = datasetFinder.findDatasets(); for (DatasetStoreDataset dataset : datasets) { ((CleanableDataset) dataset).clean(); File jobDir = new File(tmpDir.getAbsolutePath(), dataset.getKey().getStoreName()); Assert.assertEquals(jobDir.list().length, 1); } } }
2,288
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/test/ContainsCRetentionPolicy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.test; import java.util.Properties; /** * RetentionPolivy that deletes versions containing the character "c" in its name. */ public class ContainsCRetentionPolicy extends ContainsStringRetentionPolicy { public ContainsCRetentionPolicy(Properties props) { super(props); } @Override protected String getSearchToken() { return "c"; } }
2,289
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/test/ContainsStringRetentionPolicy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.test; import org.apache.gobblin.data.management.retention.policy.RetentionPolicy; import org.apache.gobblin.data.management.version.DatasetVersion; import org.apache.gobblin.data.management.version.StringDatasetVersion; import java.util.Collection; import java.util.List; import java.util.Properties; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; /** * RetentionPolivy that deletes versions containing a particular string token. */ public abstract class ContainsStringRetentionPolicy implements RetentionPolicy<StringDatasetVersion> { public ContainsStringRetentionPolicy(Properties props) { } @Override public Class<? extends DatasetVersion> versionClass() { return StringDatasetVersion.class; } @Override public Collection<StringDatasetVersion> listDeletableVersions(List<StringDatasetVersion> allVersions) { return Lists.newArrayList(Iterables.filter(allVersions, new Predicate<StringDatasetVersion>() { @Override public boolean apply(StringDatasetVersion input) { return input.getVersion().contains(getSearchToken()); } })); } protected abstract String getSearchToken(); }
2,290
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/test/ContainsBRetentionPolicy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.test; import java.util.Properties; /** * RetentionPolivy that deletes versions containing the character "b" in its name. */ public class ContainsBRetentionPolicy extends ContainsStringRetentionPolicy { public ContainsBRetentionPolicy(Properties props) { super(props); } @Override protected String getSearchToken() { return "b"; } }
2,291
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/test/ContainsARetentionPolicy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.test; import java.util.Properties; import org.apache.gobblin.annotation.Alias; /** * RetentionPolivy that deletes versions containing the character "a" in its name. */ @Alias("ContainsA") public class ContainsARetentionPolicy extends ContainsStringRetentionPolicy { public ContainsARetentionPolicy(Properties props) { super(props); } @Override protected String getSearchToken() { return "a"; } }
2,292
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/integration/RetentionIntegrationTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.integration; import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import com.google.common.io.Files; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.test.RetentionTestDataGenerator; import org.apache.gobblin.util.test.RetentionTestHelper; /** * * Integration tests for gobblin retention. * <ul> * <li> Reads a <code>setup_validate.conf</code> to setup files/dirs for the test. * See {@link RetentionTestDataGenerator#setup()} * <li> Then runs gobblin retention with confgs in <code>retention.conf</code> file. * <li> Validates for deleted and retained files as specified by <code>setup_validate.conf</code> file. * See {@link RetentionTestDataGenerator#validate()} * </ul> * * The class is a parameterized test meaning a test {@link #testRetention(String, String)} is run for every row returned * by the data provider method {@link #retentionTestDataProvider()} * * <p> * <b>Adding a new test is simple</b> * <ul> * <li> Create a package under src/test/resources/retentionIntegrationTest/YOUR_NEW_TEST * <li> Create a <code>setup_validate.conf</code> file under this package to describe data to create * and data to validate after the test * <li> Create a <code>retention.conf</code> file under this package with retention configuration. Finders, policies etc. * <li> Add YOUR_TEST_NAME to the data provider {@link #retentionTestDataProvider()} * </ul> * </p> */ @Slf4j @Test(groups = { "SystemTimeTests"}) public class RetentionIntegrationTest { private FileSystem fs; private Path testClassTempPath; private static final String SETUP_VALIDATE_CONFIG_CLASSPATH_FILENAME = "setup_validate.conf"; static final String TEST_PACKAGE_RESOURCE_NAME = "retentionIntegrationTest"; private static final String TEST_DATA_DIR_NAME = "retentionIntegrationTestData"; @BeforeClass public void setupClass() throws Exception { this.fs = FileSystem.get(new Configuration()); testClassTempPath = new Path(Files.createTempDir().getAbsolutePath(), TEST_DATA_DIR_NAME); if (!fs.mkdirs(testClassTempPath)) { throw new RuntimeException("Failed to create temp directory for the test at " + testClassTempPath.toString()); } } /** * * The method is a data provider for {@link RetentionIntegrationTest#testRetention(String, String)}, * Return a 2d string array. The pair of strings in each row is passed to {@link RetentionIntegrationTest#testRetention(String, String)} * The first element in the pair is the name of the test and 2nd string the name of job config file to use. * It may be a .properties file or a .conf file parsed by typesafe */ @DataProvider public Object[][] retentionTestDataProvider() { return new Object[][] { { "testTimeBasedRetention", "retention.conf" }, { "testTimeBasedRetention", "selection.conf" }, { "testNewestKRetention", "retention.conf" }, { "testNewestKRetention", "selection.conf" }, { "testHourlyPatternRetention", "hourly-retention.job" }, { "testDailyPatternRetention", "daily-retention.job" }, { "testMultiVersionRetention", "daily-hourly-retention.conf" }, { "testCombinePolicy", "retention.job" }, { "testCombinePolicy", "selection.conf" }, { "testTimeBasedAccessControl", "selection.conf" }, { "testMultiVersionAccessControl", "daily-retention-with-accessControl.conf" } }; } @Test(dataProvider = "retentionTestDataProvider") public void testRetention(final String testName, final String testConfFileName) throws Exception { // Temp path for this test under which test data is generated Path testNameTempPath = new Path(testClassTempPath, testName); RetentionTestDataGenerator dataGenerator = new RetentionTestDataGenerator(testNameTempPath, PathUtils.combinePaths(TEST_PACKAGE_RESOURCE_NAME, testName, SETUP_VALIDATE_CONFIG_CLASSPATH_FILENAME), fs); try { dataGenerator.setup(); RetentionTestHelper.clean(fs, PathUtils.combinePaths(TEST_PACKAGE_RESOURCE_NAME, testName, testConfFileName), testNameTempPath); dataGenerator.validate(); } finally { dataGenerator.cleanup(); } } @AfterClass public void cleanUpClass() { try { this.fs.delete(testClassTempPath, true); } catch (Exception e) { log.error("Failed to cleanup test files", e); } } }
2,293
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/integration/HiveRetentionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.integration; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.joda.time.DateTimeUtils; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Function; import com.google.common.base.Optional; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableList; import com.google.common.io.Files; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.conversion.hive.LocalHiveMetastoreTestUtils; import org.apache.gobblin.util.PathUtils; import org.apache.gobblin.util.test.RetentionTestDataGenerator.FixedThreadLocalMillisProvider; import org.apache.gobblin.util.test.RetentionTestHelper; @Test(groups = { "SystemTimeTests"}) public class HiveRetentionTest { private FileSystem fs; private Path testTempPath; private LocalHiveMetastoreTestUtils hiveMetastoreTestUtils; private static final DateTimeFormatter FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd-HH").withZone( DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME)); @BeforeClass public void setup() throws Exception { this.hiveMetastoreTestUtils = LocalHiveMetastoreTestUtils.getInstance(); fs = FileSystem.getLocal(new Configuration()); testTempPath = new Path(Files.createTempDir().getAbsolutePath(), "HiveRetentionTest"); fs.mkdirs(testTempPath); } private void testTimeBasedHiveRetention(String purgedDbName, String purgedTableName, String configFileName, boolean isReplacementTest) throws Exception { try { DateTimeUtils.setCurrentMillisProvider(new FixedThreadLocalMillisProvider(FORMATTER.parseDateTime("2016-01-10-00").getMillis())); // Setup db, table to purge. Creating 4 partitions. 2 will be deleted and 2 will be retained String purgedTableSdLoc = new Path(testTempPath, purgedDbName + purgedTableName).toString(); this.hiveMetastoreTestUtils.dropDatabaseIfExists(purgedDbName); final Table purgedTbl = this.hiveMetastoreTestUtils.createTestAvroTable(purgedDbName, purgedTableName, purgedTableSdLoc, ImmutableList.of("datepartition"), false); // Setup db, table and partitions to act as replacement partitions source String replacementSourceTableSdLoc = new Path(testTempPath, purgedDbName + purgedTableName + "_source").toString(); String replacementDbName = purgedDbName + "_source"; String replacementTableName = purgedTableName + "_source"; this.hiveMetastoreTestUtils.dropDatabaseIfExists(replacementDbName); final Table replacementTbl = this.hiveMetastoreTestUtils.createTestAvroTable(replacementDbName, replacementTableName, replacementSourceTableSdLoc, ImmutableList.of("datepartition"), false); String deleted1 = "2016-01-01-00"; String deleted2 = "2016-01-02-02"; String retained1 = "2016-01-03-04"; String retained2 = "2016-01-07-06"; // Create partitions in table being purged Partition pDeleted1 = this.hiveMetastoreTestUtils.addTestPartition(purgedTbl, ImmutableList.of(deleted1), (int) System.currentTimeMillis()); Partition pDeleted2 = this.hiveMetastoreTestUtils.addTestPartition(purgedTbl, ImmutableList.of(deleted2), (int) System.currentTimeMillis()); Partition pRetained1 = this.hiveMetastoreTestUtils.addTestPartition(purgedTbl, ImmutableList.of(retained1), (int) System.currentTimeMillis()); Partition pRetained2 = this.hiveMetastoreTestUtils.addTestPartition(purgedTbl, ImmutableList.of(retained2), (int) System.currentTimeMillis()); this.fs.mkdirs(new Path(pDeleted1.getSd().getLocation())); this.fs.mkdirs(new Path(pDeleted2.getSd().getLocation())); this.fs.mkdirs(new Path(pRetained1.getSd().getLocation())); this.fs.mkdirs(new Path(pRetained2.getSd().getLocation())); // Create partitions in table that is replacement source Partition rReplaced1 = this.hiveMetastoreTestUtils.addTestPartition(replacementTbl, ImmutableList.of(deleted1), (int) System.currentTimeMillis()); Partition rReplaced2 = this.hiveMetastoreTestUtils.addTestPartition(replacementTbl, ImmutableList.of(deleted2), (int) System.currentTimeMillis()); Partition rUntouched1 = this.hiveMetastoreTestUtils.addTestPartition(replacementTbl, ImmutableList.of(retained1), (int) System.currentTimeMillis()); Partition rUntouched2 = this.hiveMetastoreTestUtils.addTestPartition(replacementTbl, ImmutableList.of(retained2), (int) System.currentTimeMillis()); this.fs.mkdirs(new Path(rReplaced1.getSd().getLocation())); this.fs.mkdirs(new Path(rReplaced2.getSd().getLocation())); this.fs.mkdirs(new Path(rUntouched1.getSd().getLocation())); this.fs.mkdirs(new Path(rUntouched2.getSd().getLocation())); List<Partition> pPartitions = this.hiveMetastoreTestUtils.getLocalMetastoreClient().listPartitions(purgedDbName, purgedTableName, (short) 10); Assert.assertEquals(pPartitions.size(), 4); List<Partition> rPartitions = this.hiveMetastoreTestUtils.getLocalMetastoreClient().listPartitions(replacementDbName, replacementTableName, (short) 10); Assert.assertEquals(rPartitions.size(), 4); // Run retention RetentionTestHelper.clean(fs, PathUtils.combinePaths(RetentionIntegrationTest.TEST_PACKAGE_RESOURCE_NAME, "testHiveTimeBasedRetention", configFileName), Optional.of(PathUtils.combinePaths(RetentionIntegrationTest.TEST_PACKAGE_RESOURCE_NAME, "testHiveTimeBasedRetention", "jobProps.properties")), testTempPath); pPartitions = this.hiveMetastoreTestUtils.getLocalMetastoreClient().listPartitions(purgedDbName, purgedTableName, (short) 10); String[] expectedRetainedPartitions; if (isReplacementTest) { // If replacement test, 2 partitions must be replaced - hence total count must be 4 Assert.assertEquals(pPartitions.size(), 4); expectedRetainedPartitions = new String[] { getQlPartition(purgedTbl, pRetained1).getName(), getQlPartition(purgedTbl, pRetained2).getName(), getQlPartition(purgedTbl, pDeleted1).getName(), getQlPartition(purgedTbl, pDeleted2).getName()}; } else { // If not a replacement test, 2 partitions must be purged Assert.assertEquals(pPartitions.size(), 2); expectedRetainedPartitions = new String[] { getQlPartition(purgedTbl, pRetained1).getName(), getQlPartition(purgedTbl, pRetained2).getName()}; } // Check if all available partitions are that which are expected assertThat(FluentIterable.from(pPartitions).transform(new Function<Partition, String>() { @Override public String apply(Partition input) { return getQlPartition(purgedTbl, input).getName(); } }).toList(), containsInAnyOrder(expectedRetainedPartitions)); // Check that replaced partitions are pointing to correct physical location if (isReplacementTest) { for (Partition partition : pPartitions) { if (getQlPartition(purgedTbl, partition).getName().equalsIgnoreCase(getQlPartition(purgedTbl, pDeleted1).getName())) { Assert.assertEquals(partition.getSd().getLocation(), rReplaced1.getSd().getLocation(), "Replaced partition location not updated."); } if (getQlPartition(purgedTbl, partition).getName().equalsIgnoreCase(getQlPartition(purgedTbl, pDeleted2).getName())) { Assert.assertEquals(partition.getSd().getLocation(), rReplaced2.getSd().getLocation(), "Replaced partition location not updated."); } } } // Irrespective of whether it is a replacement test, purged partition directories must be deleted Assert.assertTrue(this.fs.exists(new Path(pRetained1.getSd().getLocation()))); Assert.assertTrue(this.fs.exists(new Path(pRetained2.getSd().getLocation()))); Assert.assertFalse(this.fs.exists(new Path(pDeleted1.getSd().getLocation()))); Assert.assertFalse(this.fs.exists(new Path(pDeleted2.getSd().getLocation()))); // Replacement source partition directories must be left untouched Assert.assertTrue(this.fs.exists(new Path(rReplaced1.getSd().getLocation()))); Assert.assertTrue(this.fs.exists(new Path(rReplaced2.getSd().getLocation()))); Assert.assertTrue(this.fs.exists(new Path(rUntouched1.getSd().getLocation()))); Assert.assertTrue(this.fs.exists(new Path(rUntouched2.getSd().getLocation()))); } finally { DateTimeUtils.setCurrentMillisSystem(); } } @Test(dependsOnMethods = { "testTimeBasedHiveRetentionWithConfigStore" }) public void testTimeBasedHivePartitionReplacementWithConfigStore() throws Exception { String dbName = "hiveTestDbConfigStore"; String tableName = "testTable"; this.testTimeBasedHiveRetention(dbName, tableName, "replacement.conf", true); } @Test(dependsOnMethods = { "testTimeBasedHiveRetentionWithJobProps" }) public void testTimeBasedHiveRetentionWithConfigStore() throws Exception { String dbName = "hiveTestDbConfigStore"; String tableName = "testTable"; this.testTimeBasedHiveRetention(dbName, tableName, "selection.conf", false); } @Test public void testTimeBasedHiveRetentionWithJobProps() throws Exception { String dbName = "hiveTestDb"; String tableName = "testTable"; this.testTimeBasedHiveRetention(dbName, tableName, "hive-retention.job", false); } private static org.apache.hadoop.hive.ql.metadata.Partition getQlPartition(final Table table, final Partition partition) { try { return new org.apache.hadoop.hive.ql.metadata.Partition(new org.apache.hadoop.hive.ql.metadata.Table(table), partition); } catch (HiveException e) { throw new RuntimeException(e); } } @AfterClass public void cleanUp() { try { fs.delete(this.testTempPath, true); } catch (Exception e) { // ignore } } }
2,294
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/source/DatasetCleanerSourceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.source; import java.util.List; import java.util.Properties; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.runtime.retention.DatasetCleanerTaskFactory; import org.apache.gobblin.source.workunit.WorkUnit; public class DatasetCleanerSourceTest { private static final String KEY1 = "key1"; private static final String KEY2 = "key2"; private static final String VALUE1 = "value1"; private static final String VALUE2 = "value2"; private static final String BAD_VALUE = "bad_value"; private static final String TASK_FACTORY_KEY = "org.apache.gobblin.runtime.taskFactoryClass"; private String getSourcePrefixed(String string) { return DatasetCleanerSource.DATASET_CLEANER_SOURCE_PREFIX + "." + string; } private String getConfigPrefixed(String configName, String string) { return getSourcePrefixed(configName + "." + string); } @Test public void testSingleConfig() { DatasetCleanerSource source = new DatasetCleanerSource(); SourceState sourceState = new SourceState(); Properties props = new Properties(); props.put(KEY1, VALUE1); props.put(KEY2, VALUE2); sourceState.setProps(props, new Properties()); List<WorkUnit> workUnits = source.getWorkunits(sourceState); Assert.assertEquals(workUnits.size(), 1); Assert.assertEquals(workUnits.get(0).getProp(TASK_FACTORY_KEY), DatasetCleanerTaskFactory.class.getName()); Assert.assertEquals(workUnits.get(0).getProp(KEY1), VALUE1); Assert.assertEquals(workUnits.get(0).getProp(KEY2), VALUE2); } @Test public void testMultipleConfig() { DatasetCleanerSource source = new DatasetCleanerSource(); SourceState sourceState = new SourceState(); Properties props = new Properties(); props.put(DatasetCleanerSource.DATASET_CLEANER_CONFIGURATIONS, "config1, config2"); // test that config scoped config overrides source and base config props.put(KEY1, BAD_VALUE); props.put(getSourcePrefixed(KEY1), BAD_VALUE); props.put(getConfigPrefixed("config1", KEY1), VALUE1); // Test that source scoped config overrides base config props.put(KEY2, BAD_VALUE); props.put(getSourcePrefixed(KEY2), VALUE2); sourceState.setProps(props, new Properties()); List<WorkUnit> workUnits = source.getWorkunits(sourceState); Assert.assertEquals(workUnits.size(), 2); Assert.assertEquals(workUnits.get(0).getProp(TASK_FACTORY_KEY), DatasetCleanerTaskFactory.class.getName()); Assert.assertEquals(workUnits.get(1).getProp(TASK_FACTORY_KEY), DatasetCleanerTaskFactory.class.getName()); Assert.assertEquals(workUnits.get(0).getProp(KEY1), VALUE1); Assert.assertEquals(workUnits.get(0).getProp(KEY2), VALUE2); Assert.assertEquals(workUnits.get(1).getProp(KEY1), BAD_VALUE); Assert.assertEquals(workUnits.get(1).getProp(KEY2), VALUE2); } }
2,295
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/action/RetentionActionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.action; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableMap; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.data.management.policy.SelectAfterTimeBasedPolicy; @Test(groups = { "gobblin.data.management.retention" }) public class RetentionActionTest { @Test public void testSelectionPolicyInit() throws Exception { // Using alias AccessControlAction testRetentionAction = new AccessControlAction(ConfigFactory.parseMap(ImmutableMap.<String, String> of("selection.policy.class", "SelectAfterTimeBasedPolicy", "selection.timeBased.lookbackTime", "7d")), null, ConfigFactory.empty()); Assert.assertEquals(testRetentionAction.getSelectionPolicy().getClass(), SelectAfterTimeBasedPolicy.class); // Using complete class name testRetentionAction = new AccessControlAction(ConfigFactory.parseMap(ImmutableMap.<String, String> of("selection.policy.class", SelectAfterTimeBasedPolicy.class.getName(), "selection.timeBased.lookbackTime", "7d")), null, ConfigFactory.empty()); Assert.assertEquals(testRetentionAction.getSelectionPolicy().getClass(), SelectAfterTimeBasedPolicy.class); } }
2,296
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/version/HiveDatasetVersionCleanerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.version; import java.io.IOException; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; public class HiveDatasetVersionCleanerTest { private static Config config = ConfigFactory.parseMap(ImmutableMap.<String, String> of( HiveDatasetVersionCleaner.SHOULD_REPLACE_PARTITION_KEY, "true")); private static String replacedDb = "db_orc"; private static String replacedTable = "table_orc"; private static String replacementDb = "db_avro"; private static String replacementTable = "table_avro"; @Test public void testShouldReplacePartitionHappyPath() throws IOException { // Happy path 1: // - Replacement is enabled // - Replacement DB, Table names are specified and are different than Replaced DB and Table names Assert.assertTrue(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacementDb), Optional.of(replacementTable)), "Replaced and replacement db / table are different. " + "This should have been true. "); // Happy path 2: // - Replacement is enabled // - Replacement DB, Table names are specified and Replaced DB name is different Assert.assertTrue(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacementDb), Optional.of(replacedTable)), "Replaced and replacement db / table are different. " + "This should have been true. "); // Happy path 3: // - Replacement is enabled // - Replacement DB, Table names are specified and Replaced Table name is different Assert.assertTrue(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacedDb), Optional.of(replacementTable)), "Replaced and replacement db / table are different. " + "This should have been true. "); } @Test public void testShouldReplacePartitionDisabledByConfig() throws IOException { Config config = ConfigFactory.parseMap(ImmutableMap.<String, String> of( HiveDatasetVersionCleaner.SHOULD_REPLACE_PARTITION_KEY, "false")); Assert.assertFalse(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacementDb), Optional.of(replacementTable)), "Property governing partition replacement is set to false. " + "This should have been false. "); } @Test public void testShouldReplacePartitionDisabledByCodePath() throws IOException { // Replacement DB and Table names are same as Replaced DB and Table names Assert.assertFalse(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacedDb), Optional.of(replacedTable)), "Replaced and replacement db / table are same. " + "This should have been false. "); // Replaced DB name is missing Assert.assertFalse(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.<String>absent(), Optional.of(replacementTable)), "Replacement DB name is missing. " + "This should have been false. "); // Replaced Table name is missing Assert.assertFalse(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.of(replacementDb), Optional.<String>absent()), "Replacement table name is missing. " + "This should have been false. "); // Both DB and Table names are missing Assert.assertFalse(HiveDatasetVersionCleaner.shouldReplacePartition(config, replacedDb, replacedTable, Optional.<String>absent(), Optional.<String>absent()), "Replacement DB and table names are missing. " + "This should have been false. "); } }
2,297
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/sql/SqlUdfs.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.sql; import java.sql.Timestamp; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang.StringUtils; /** * Holds User defined functions used by Derby db. */ @Slf4j public final class SqlUdfs { /** * Returns a value after subtracting a {@link Timestamp} from another. Value is in the {@link TimeUnit} provided in * <code>unit</code> string. * * @param timestamp1 first {@link Timestamp} * @param timestamp2 second {@link Timestamp} * @param unit for the difference. Any {@link TimeUnit#values()} are supported units. * @return */ public static long timestamp_diff(Timestamp timestamp1, Timestamp timestamp2, String unit) { return date_diff(timestamp1.getTime(), timestamp2.getTime(), unit); } private static long date_diff(long timestamp1, long timestamp2, String unitString) { try { TimeUnit unit = TimeUnit.valueOf(TimeUnit.class, StringUtils.upperCase(unitString)); return unit.convert(timestamp1 - timestamp2, TimeUnit.MILLISECONDS); } catch (IllegalArgumentException e) { log.error("Valid input for unitString is java.util.concurrent.TimeUnit", e); } return 0l; } }
2,298
0
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention
Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/retention/sql/SqlBasedRetentionPoc.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention.sql; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import com.zaxxer.hikari.HikariDataSource; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** * A Proof of concept to represent Retention policies as SQL queries. The POC uses Apache Derby in-memory database to * store directory structure metadata. */ public class SqlBasedRetentionPoc { private static final int TWO_YEARS_IN_DAYS = 365 * 2; private static final String DAILY_PARTITION_PATTERN = "yyyy/MM/dd"; private HikariDataSource dataSource; private Connection connection; /** * <ul> * <li>Create the in-memory database and connect * <li>Create tables for snapshots and daily_paritions * <li>Attach all user defined functions from {@link SqlUdfs} * </ul> * */ @BeforeClass public void setup() throws SQLException { dataSource = new HikariDataSource(); dataSource.setDriverClassName("org.apache.derby.jdbc.EmbeddedDriver"); dataSource.setJdbcUrl("jdbc:derby:memory:derbypoc;create=true"); Connection connection = dataSource.getConnection(); connection.setAutoCommit(false); this.connection = connection; execute("CREATE TABLE Snapshots (dataset_path VARCHAR(255), name VARCHAR(255), path VARCHAR(255), ts TIMESTAMP, row_count bigint)"); execute("CREATE TABLE Daily_Partitions (dataset_path VARCHAR(255), path VARCHAR(255), ts TIMESTAMP)"); // Register UDFs execute("CREATE FUNCTION TIMESTAMP_DIFF(timestamp1 TIMESTAMP, timestamp2 TIMESTAMP, unitString VARCHAR(50)) RETURNS BIGINT PARAMETER STYLE JAVA NO SQL LANGUAGE JAVA EXTERNAL NAME 'org.apache.gobblin.data.management.retention.sql.SqlUdfs.timestamp_diff'"); } @AfterClass public void cleanUp() throws Exception { dataSource.close(); } /** * * The test inserts a few test snapshots. A query is issued to retrieve the two most recent snapshots */ @Test public void testKeepLast2Snapshots() throws Exception { insertSnapshot(new Path("/data/databases/Forum/Comments/1453743903767-PT-440505235")); insertSnapshot(new Path("/data/databases/Forum/Comments/1453830569999-PT-440746131")); insertSnapshot(new Path("/data/databases/Forum/Comments/1453860526464-PT-440847244")); insertSnapshot(new Path("/data/databases/Forum/Comments/1453889323804-PT-440936752")); // Derby does not support LIMIT keyword. The suggested workaround is to setMaxRows in the PreparedStatement PreparedStatement statement = connection.prepareStatement("SELECT name FROM Snapshots ORDER BY ts desc"); statement.setMaxRows(2); ResultSet rs = statement.executeQuery(); // Snapshots to be retained rs.next(); Assert.assertEquals(rs.getString(1), "1453889323804-PT-440936752"); rs.next(); Assert.assertEquals(rs.getString(1), "1453860526464-PT-440847244"); } /** * The test inserts a few time partitioned datasets. A query is issued that retrieves the partitions older than 2 * years. */ @Test public void testKeepLast2YearsOfDailyPartitions() throws Exception { insertDailyPartition(new Path("/data/tracking/MetricEvent/daily/2015/11/25")); //61 days insertDailyPartition(new Path("/data/tracking/MetricEvent/daily/2015/12/01")); // 55 days insertDailyPartition(new Path("/data/tracking/MetricEvent/daily/2014/11/21")); // 430 days insertDailyPartition(new Path("/data/tracking/MetricEvent/daily/2014/01/22")); // 733 days (more than 2 years) insertDailyPartition(new Path("/data/tracking/MetricEvent/daily/2013/01/25")); // 1095 days (more than 2 years) // Use the current timestamp for consistent test results. Timestamp currentTimestamp = new Timestamp(DateTimeFormat.forPattern(DAILY_PARTITION_PATTERN).parseDateTime("2016/01/25").getMillis()); PreparedStatement statement = connection.prepareStatement("SELECT path FROM Daily_Partitions WHERE TIMESTAMP_DIFF(?, ts, 'Days') > ?"); statement.setTimestamp(1, currentTimestamp); statement.setLong(2, TWO_YEARS_IN_DAYS); ResultSet rs = statement.executeQuery(); // Daily partitions to be cleaned rs.next(); Assert.assertEquals(rs.getString(1), "/data/tracking/MetricEvent/daily/2014/01/22"); rs.next(); Assert.assertEquals(rs.getString(1), "/data/tracking/MetricEvent/daily/2013/01/25"); } private void insertSnapshot(Path snapshotPath) throws Exception { String datasetPath = StringUtils.substringBeforeLast(snapshotPath.toString(), Path.SEPARATOR); String snapshotName = StringUtils.substringAfterLast(snapshotPath.toString(), Path.SEPARATOR); long ts = Long.parseLong(StringUtils.substringBefore(snapshotName, "-PT-")); long recordCount = Long.parseLong(StringUtils.substringAfter(snapshotName, "-PT-")); PreparedStatement insert = connection.prepareStatement("INSERT INTO Snapshots VALUES (?, ?, ?, ?, ?)"); insert.setString(1, datasetPath); insert.setString(2, snapshotName); insert.setString(3, snapshotPath.toString()); insert.setTimestamp(4, new Timestamp(ts)); insert.setLong(5, recordCount); insert.executeUpdate(); } private void insertDailyPartition(Path dailyPartitionPath) throws Exception { String datasetPath = StringUtils.substringBeforeLast(dailyPartitionPath.toString(), Path.SEPARATOR + "daily"); DateTime partition = DateTimeFormat.forPattern(DAILY_PARTITION_PATTERN).parseDateTime( StringUtils.substringAfter(dailyPartitionPath.toString(), "daily" + Path.SEPARATOR)); PreparedStatement insert = connection.prepareStatement("INSERT INTO Daily_Partitions VALUES (?, ?, ?)"); insert.setString(1, datasetPath); insert.setString(2, dailyPartitionPath.toString()); insert.setTimestamp(3, new Timestamp(partition.getMillis())); insert.executeUpdate(); } private void execute(String query) throws SQLException { PreparedStatement insertStatement = connection.prepareStatement(query); insertStatement.executeUpdate(); } }
2,299