index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/DagManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.eventbus.Subscribe;
import com.google.common.util.concurrent.AbstractIdleService;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.DagActionStore;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.SpecCompiler;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.utils.FlowCompilationValidationHelper;
import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton;
import org.apache.gobblin.service.monitoring.FlowStatusGenerator;
import org.apache.gobblin.service.monitoring.JobStatus;
import org.apache.gobblin.service.monitoring.JobStatusRetriever;
import org.apache.gobblin.service.monitoring.KillFlowEvent;
import org.apache.gobblin.service.monitoring.ResumeFlowEvent;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.service.ExecutionStatus.*;
/**
* This class implements a manager to manage the life cycle of a {@link Dag}. A {@link Dag} is submitted to the
* {@link DagManager} by the {@link Orchestrator#orchestrate(Spec)} method. On receiving a {@link Dag}, the
* {@link DagManager} first persists the {@link Dag} to the {@link DagStateStore}, and then submits it to the specific
* {@link DagManagerThread}'s {@link BlockingQueue} based on the flowExecutionId of the Flow.
* This guarantees that each {@link Dag} received by the {@link DagManager} can be recovered in case of a leadership
* change or service restart.
*
* The implementation of the {@link DagManager} is multi-threaded. Each {@link DagManagerThread} polls the
* {@link BlockingQueue} for new Dag submissions at fixed intervals. It deques any newly submitted Dags and coordinates
* the execution of individual jobs in the Dag. The coordination logic involves polling the {@link JobStatus}es of running
* jobs. Upon completion of a job, it will either schedule the next job in the Dag (on SUCCESS) or mark the Dag as failed
* (on FAILURE). Upon completion of a Dag execution, it will perform the required clean up actions.
*
* For deleteSpec/cancellation requests for a flow URI, {@link DagManager} finds out the flowExecutionId using
* {@link JobStatusRetriever}, and forwards the request to the {@link DagManagerThread} which handled the addSpec request
* for this flow. We need separate {@link BlockingQueue}s for each {@link DagManagerThread} because
* cancellation needs the information which is stored only in the same {@link DagManagerThread}.
*
* The {@link DagManager} is active only in the leader mode. To ensure, each {@link Dag} managed by a {@link DagManager} is
* checkpointed to a persistent location. On start up or leadership change,
* the {@link DagManager} loads all the checkpointed {@link Dag}s and adds them to the {@link BlockingQueue}.
*/
@Alpha
@Slf4j
@Singleton
public class DagManager extends AbstractIdleService {
public static final String DEFAULT_FLOW_FAILURE_OPTION = FailureOption.FINISH_ALL_POSSIBLE.name();
public static final String DAG_MANAGER_PREFIX = "gobblin.service.dagManager.";
private static final Integer DEFAULT_JOB_STATUS_POLLING_INTERVAL = 10;
public static final Integer DEFAULT_NUM_THREADS = 3;
private static final Integer TERMINATION_TIMEOUT = 30;
public static final String NUM_THREADS_KEY = DAG_MANAGER_PREFIX + "numThreads";
public static final String JOB_STATUS_POLLING_INTERVAL_KEY = DAG_MANAGER_PREFIX + "pollingInterval";
private static final String DAG_STATESTORE_CLASS_KEY = DAG_MANAGER_PREFIX + "dagStateStoreClass";
private static final String FAILED_DAG_STATESTORE_PREFIX = "failedDagStateStore";
private static final String FAILED_DAG_RETENTION_TIME_UNIT = FAILED_DAG_STATESTORE_PREFIX + ".retention.timeUnit";
private static final String DEFAULT_FAILED_DAG_RETENTION_TIME_UNIT = "DAYS";
private static final String FAILED_DAG_RETENTION_TIME = FAILED_DAG_STATESTORE_PREFIX + ".retention.time";
private static final long DEFAULT_FAILED_DAG_RETENTION_TIME = 7L;
// Re-emit the final flow status if not detected within 5 minutes
private static final long DAG_FLOW_STATUS_TOLERANCE_TIME_MILLIS = TimeUnit.MINUTES.toMillis(5);
public static final String FAILED_DAG_POLLING_INTERVAL = FAILED_DAG_STATESTORE_PREFIX + ".retention.pollingIntervalMinutes";
public static final Integer DEFAULT_FAILED_DAG_POLLING_INTERVAL = 60;
public static final String DAG_MANAGER_HEARTBEAT = ServiceMetricNames.GOBBLIN_SERVICE_PREFIX_WITH_DELIMITER + "dagManager.heartbeat-%s";
// Default job start SLA time if configured, measured in minutes. Default is 10 minutes
private static final String JOB_START_SLA_TIME = DAG_MANAGER_PREFIX + ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME;
private static final String JOB_START_SLA_UNITS = DAG_MANAGER_PREFIX + ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME_UNIT;
private static final int MAX_HOUSEKEEPING_THREAD_DELAY = 180;
private static final int INITIAL_HOUSEKEEPING_THREAD_DELAY = 2;
/**
* Action to be performed on a {@link Dag}, in case of a job failure. Currently, we allow 2 modes:
* <ul>
* <li> FINISH_RUNNING, which allows currently running jobs to finish.</li>
* <li> FINISH_ALL_POSSIBLE, which allows every possible job in the Dag to finish, as long as all the dependencies
* of the job are successful.</li>
* </ul>
*/
public enum FailureOption {
FINISH_RUNNING("FINISH_RUNNING"),
CANCEL("CANCEL"),
FINISH_ALL_POSSIBLE("FINISH_ALL_POSSIBLE");
private final String failureOption;
FailureOption(final String failureOption) {
this.failureOption = failureOption;
}
@Override
public String toString() {
return this.failureOption;
}
}
@Getter
@EqualsAndHashCode
public static class DagId {
String flowGroup;
String flowName;
String flowExecutionId;
public DagId(String flowGroup, String flowName, String flowExecutionId) {
this.flowGroup = flowGroup;
this.flowName = flowName;
this.flowExecutionId = flowExecutionId;
}
@Override
public String toString() {
return Joiner.on("_").join(flowGroup, flowName, flowExecutionId);
}
}
private final BlockingQueue<Dag<JobExecutionPlan>>[] runQueue;
private final BlockingQueue<DagId>[] cancelQueue;
private final BlockingQueue<DagId>[] resumeQueue;
DagManagerThread[] dagManagerThreads;
private final ScheduledExecutorService scheduledExecutorPool;
private final boolean instrumentationEnabled;
private DagStateStore dagStateStore;
private Map<URI, TopologySpec> topologySpecMap;
private int houseKeepingThreadInitialDelay = INITIAL_HOUSEKEEPING_THREAD_DELAY;
@Getter
private ScheduledExecutorService houseKeepingThreadPool;
@Getter
private final Integer numThreads;
private final Integer pollingInterval;
private final Integer retentionPollingInterval;
protected final Long defaultJobStartSlaTimeMillis;
@Getter
private final JobStatusRetriever jobStatusRetriever;
private final FlowStatusGenerator flowStatusGenerator;
private final UserQuotaManager quotaManager;
private final SpecCompiler specCompiler;
private final boolean isFlowConcurrencyEnabled;
private final FlowCatalog flowCatalog;
private final FlowCompilationValidationHelper flowCompilationValidationHelper;
private final Config config;
private final Optional<EventSubmitter> eventSubmitter;
private final long failedDagRetentionTime;
private final DagManagerMetrics dagManagerMetrics;
@Inject(optional=true)
protected Optional<DagActionStore> dagActionStore;
private volatile boolean isActive = false;
public DagManager(Config config, JobStatusRetriever jobStatusRetriever,
SharedFlowMetricsSingleton sharedFlowMetricsSingleton, FlowStatusGenerator flowStatusGenerator,
FlowCatalog flowCatalog, boolean instrumentationEnabled) {
this.config = config;
this.numThreads = ConfigUtils.getInt(config, NUM_THREADS_KEY, DEFAULT_NUM_THREADS);
this.runQueue = (BlockingQueue<Dag<JobExecutionPlan>>[]) initializeDagQueue(this.numThreads);
this.cancelQueue = (BlockingQueue<DagId>[]) initializeDagQueue(this.numThreads);
this.resumeQueue = (BlockingQueue<DagId>[]) initializeDagQueue(this.numThreads);
this.scheduledExecutorPool = Executors.newScheduledThreadPool(numThreads);
this.pollingInterval = ConfigUtils.getInt(config, JOB_STATUS_POLLING_INTERVAL_KEY, DEFAULT_JOB_STATUS_POLLING_INTERVAL);
this.retentionPollingInterval = ConfigUtils.getInt(config, FAILED_DAG_POLLING_INTERVAL, DEFAULT_FAILED_DAG_POLLING_INTERVAL);
this.instrumentationEnabled = instrumentationEnabled;
MetricContext metricContext = null;
if (instrumentationEnabled) {
metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), getClass());
this.eventSubmitter = Optional.of(new EventSubmitter.Builder(metricContext, "org.apache.gobblin.service").build());
} else {
this.eventSubmitter = Optional.absent();
}
this.dagManagerMetrics = new DagManagerMetrics();
TimeUnit jobStartTimeUnit = TimeUnit.valueOf(ConfigUtils.getString(config, JOB_START_SLA_UNITS, ConfigurationKeys.FALLBACK_GOBBLIN_JOB_START_SLA_TIME_UNIT));
this.defaultJobStartSlaTimeMillis = jobStartTimeUnit.toMillis(ConfigUtils.getLong(config, JOB_START_SLA_TIME, ConfigurationKeys.FALLBACK_GOBBLIN_JOB_START_SLA_TIME));
this.jobStatusRetriever = jobStatusRetriever;
this.flowStatusGenerator = flowStatusGenerator;
this.specCompiler = GobblinConstructorUtils.invokeConstructor(SpecCompiler.class, ConfigUtils.getString(config,
ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY,
ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_FLOWCOMPILER_CLASS), config);
this.isFlowConcurrencyEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.FLOW_CONCURRENCY_ALLOWED,
ServiceConfigKeys.DEFAULT_FLOW_CONCURRENCY_ALLOWED);
this.quotaManager = GobblinConstructorUtils.invokeConstructor(UserQuotaManager.class,
ConfigUtils.getString(config, ServiceConfigKeys.QUOTA_MANAGER_CLASS, ServiceConfigKeys.DEFAULT_QUOTA_MANAGER),
config);
this.flowCatalog = flowCatalog;
this.flowCompilationValidationHelper = new FlowCompilationValidationHelper(sharedFlowMetricsSingleton, specCompiler,
quotaManager, eventSubmitter, flowStatusGenerator, isFlowConcurrencyEnabled);
TimeUnit timeUnit = TimeUnit.valueOf(ConfigUtils.getString(config, FAILED_DAG_RETENTION_TIME_UNIT, DEFAULT_FAILED_DAG_RETENTION_TIME_UNIT));
this.failedDagRetentionTime = timeUnit.toMillis(ConfigUtils.getLong(config, FAILED_DAG_RETENTION_TIME, DEFAULT_FAILED_DAG_RETENTION_TIME));
}
DagStateStore createDagStateStore(Config config, Map<URI, TopologySpec> topologySpecMap) {
try {
Class<?> dagStateStoreClass = Class.forName(ConfigUtils.getString(config, DAG_STATESTORE_CLASS_KEY, FSDagStateStore.class.getName()));
return (DagStateStore) GobblinConstructorUtils.invokeLongestConstructor(dagStateStoreClass, config, topologySpecMap);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
// Initializes and returns an array of Queue of size numThreads
private static LinkedBlockingDeque<?>[] initializeDagQueue(int numThreads) {
LinkedBlockingDeque<?>[] queue = new LinkedBlockingDeque[numThreads];
for (int i=0; i< numThreads; i++) {
queue[i] = new LinkedBlockingDeque<>();
}
return queue;
}
@Inject
public DagManager(Config config, JobStatusRetriever jobStatusRetriever,
SharedFlowMetricsSingleton sharedFlowMetricsSingleton, FlowStatusGenerator flowStatusGenerator,
FlowCatalog flowCatalog) {
this(config, jobStatusRetriever, sharedFlowMetricsSingleton, flowStatusGenerator, flowCatalog, true);
}
/** Do Nothing on service startup. Scheduling of {@link DagManagerThread}s and loading of any {@link Dag}s is done
* during leadership change.
*/
@Override
protected void startUp() {
//Do nothing.
}
/**
* Method to submit a {@link Dag} to the {@link DagManager}. The {@link DagManager} optionally persists the
* submitted dag to the {@link DagStateStore} and then adds the dag to a {@link BlockingQueue} to be picked up
* by one of the {@link DagManagerThread}s.
* @param dag {@link Dag} to be added
* @param persist whether to persist the dag to the {@link DagStateStore}
* @param setStatus if true, set all jobs in the dag to pending
* Note this should only be called from the {@link Orchestrator} or {@link org.apache.gobblin.service.monitoring.DagActionStoreChangeMonitor}
*/
public synchronized void addDag(Dag<JobExecutionPlan> dag, boolean persist, boolean setStatus) throws IOException {
// TODO: Used to track missing dag issue, remove later as needed
log.info("Add dag (persist: {}, setStatus: {}): {}", persist, setStatus, dag);
if (!isActive) {
log.warn("Skipping add dag because this instance of DagManager is not active for dag: {}", dag);
return;
}
if (persist) {
//Persist the dag
this.dagStateStore.writeCheckpoint(dag);
}
int queueId = DagManagerUtils.getDagQueueId(dag, this.numThreads);
// Add the dag to the specific queue determined by flowExecutionId
// Flow cancellation request has to be forwarded to the same DagManagerThread where the
// flow create request was forwarded. This is because Azkaban Exec Id is stored in the DagNode of the
// specific DagManagerThread queue
if (!this.runQueue[queueId].offer(dag)) {
throw new IOException("Could not add dag" + DagManagerUtils.generateDagId(dag) + "to queue");
}
if (setStatus) {
submitEventsAndSetStatus(dag);
}
}
private void submitEventsAndSetStatus(Dag<JobExecutionPlan> dag) {
if (this.eventSubmitter.isPresent()) {
for (DagNode<JobExecutionPlan> dagNode : dag.getNodes()) {
JobExecutionPlan jobExecutionPlan = DagManagerUtils.getJobExecutionPlan(dagNode);
Map<String, String> jobMetadata = TimingEventUtils.getJobMetadata(Maps.newHashMap(), jobExecutionPlan);
this.eventSubmitter.get().getTimingEvent(TimingEvent.LauncherTimings.JOB_PENDING).stop(jobMetadata);
jobExecutionPlan.setExecutionStatus(PENDING);
}
}
}
/**
* Method to submit a {@link URI} for cancellation requests to the {@link DagManager}.
* The {@link DagManager} adds the dag to the {@link BlockingQueue} to be picked up by one of the {@link DagManagerThread}s.
*/
synchronized public void stopDag(URI uri) throws IOException {
String flowGroup = FlowSpec.Utils.getFlowGroup(uri);
String flowName = FlowSpec.Utils.getFlowName(uri);
List<Long> flowExecutionIds = this.jobStatusRetriever.getLatestExecutionIdsForFlow(flowName, flowGroup, 10);
log.info("Found {} flows to cancel.", flowExecutionIds.size());
for (long flowExecutionId : flowExecutionIds) {
killFlow(flowGroup, flowName, flowExecutionId);
}
}
/**
* Add the specified flow to {@link DagManager#cancelQueue}
*/
private void killFlow(String flowGroup, String flowName, long flowExecutionId) throws IOException {
int queueId = DagManagerUtils.getDagQueueId(flowExecutionId, this.numThreads);
DagId dagId = DagManagerUtils.generateDagId(flowGroup, flowName, flowExecutionId);
if (!this.cancelQueue[queueId].offer(dagId)) {
throw new IOException("Could not add dag " + dagId + " to cancellation queue.");
}
}
@Subscribe
public void handleKillFlowEvent(KillFlowEvent killFlowEvent) {
handleKillFlowRequest(killFlowEvent.getFlowGroup(), killFlowEvent.getFlowName(), killFlowEvent.getFlowExecutionId());
}
// Method used to handle kill flow requests received from subscriber-event model or from direct invocation
public void handleKillFlowRequest(String flowGroup, String flowName, long flowExecutionId) {
if (isActive) {
log.info("Received kill request for flow ({}, {}, {})", flowGroup, flowName, flowExecutionId);
try {
killFlow(flowGroup, flowName, flowExecutionId);
} catch (IOException e) {
log.warn("Failed to kill flow", e);
}
}
}
// Method used to handle resume flow requests received from subscriber-event model or from direct invocation
public void handleResumeFlowRequest(String flowGroup, String flowName, long flowExecutionId) {
if (isActive) {
log.info("Received resume request for flow ({}, {}, {})", flowGroup, flowName, flowExecutionId);
DagId dagId = DagManagerUtils.generateDagId(flowGroup, flowName, flowExecutionId);
int queueId = DagManagerUtils.getDagQueueId(flowExecutionId, this.numThreads);
if (!this.resumeQueue[queueId].offer(dagId)) {
log.warn("Could not add dag " + dagId + " to resume queue");
}
}
}
@Subscribe
public void handleResumeFlowEvent(ResumeFlowEvent resumeFlowEvent) {
handleResumeFlowRequest(resumeFlowEvent.getFlowGroup(), resumeFlowEvent.getFlowName(), resumeFlowEvent.getFlowExecutionId());
}
public synchronized void setTopologySpecMap(Map<URI, TopologySpec> topologySpecMap) {
this.topologySpecMap = topologySpecMap;
}
/**
* When a {@link DagManager} becomes active, it loads the serialized representations of the currently running {@link Dag}s
* from the checkpoint directory, deserializes the {@link Dag}s and adds them to a queue to be consumed by
* the {@link DagManagerThread}s.
* @param active a boolean to indicate if the {@link DagManager} is the leader.
*/
public synchronized void setActive(boolean active) {
if (this.isActive == active) {
log.info("DagManager already {}, skipping further actions.", (!active) ? "inactive" : "active");
return;
}
this.isActive = active;
try {
if (this.isActive) {
log.info("Activating DagManager.");
log.info("Scheduling {} DagManager threads", numThreads);
//Initializing state store for persisting Dags.
this.dagStateStore = createDagStateStore(config, topologySpecMap);
DagStateStore failedDagStateStore =
createDagStateStore(ConfigUtils.getConfigOrEmpty(config, FAILED_DAG_STATESTORE_PREFIX).withFallback(config),
topologySpecMap);
Set<String> failedDagIds = Collections.synchronizedSet(failedDagStateStore.getDagIds());
this.dagManagerMetrics.activate();
UserQuotaManager quotaManager = GobblinConstructorUtils.invokeConstructor(UserQuotaManager.class,
ConfigUtils.getString(config, ServiceConfigKeys.QUOTA_MANAGER_CLASS, ServiceConfigKeys.DEFAULT_QUOTA_MANAGER), config);
quotaManager.init(dagStateStore.getDags());
//On startup, the service creates DagManagerThreads that are scheduled at a fixed rate.
this.dagManagerThreads = new DagManagerThread[numThreads];
for (int i = 0; i < numThreads; i++) {
DagManagerThread dagManagerThread = new DagManagerThread(jobStatusRetriever, dagStateStore, failedDagStateStore, dagActionStore,
runQueue[i], cancelQueue[i], resumeQueue[i], instrumentationEnabled, failedDagIds, this.dagManagerMetrics,
this.defaultJobStartSlaTimeMillis, quotaManager, i);
this.dagManagerThreads[i] = dagManagerThread;
this.scheduledExecutorPool.scheduleAtFixedRate(dagManagerThread, 0, this.pollingInterval, TimeUnit.SECONDS);
}
FailedDagRetentionThread failedDagRetentionThread = new FailedDagRetentionThread(failedDagStateStore, failedDagIds, failedDagRetentionTime);
this.scheduledExecutorPool.scheduleAtFixedRate(failedDagRetentionThread, 0, retentionPollingInterval, TimeUnit.MINUTES);
loadDagFromDagStateStore();
this.houseKeepingThreadPool = Executors.newSingleThreadScheduledExecutor();
for (int delay = houseKeepingThreadInitialDelay; delay < MAX_HOUSEKEEPING_THREAD_DELAY; delay *= 2) {
this.houseKeepingThreadPool.schedule(() -> {
try {
loadDagFromDagStateStore();
} catch (Exception e ) {
log.error("failed to sync dag state store due to ", e);
}}, delay, TimeUnit.MINUTES);
}
if (dagActionStore.isPresent()) {
Collection<DagActionStore.DagAction> dagActions = dagActionStore.get().getDagActions();
for (DagActionStore.DagAction action : dagActions) {
switch (action.getFlowActionType()) {
case KILL:
this.handleKillFlowEvent(new KillFlowEvent(action.getFlowGroup(), action.getFlowName(), Long.parseLong(action.getFlowExecutionId())));
break;
case RESUME:
this.handleResumeFlowEvent(new ResumeFlowEvent(action.getFlowGroup(), action.getFlowName(), Long.parseLong(action.getFlowExecutionId())));
break;
case LAUNCH:
this.handleLaunchFlowEvent(action);
break;
default:
log.warn("Unsupported dagAction: " + action.getFlowActionType().toString());
}
}
}
} else { //Mark the DagManager inactive.
log.info("Inactivating the DagManager. Shutting down all DagManager threads");
this.scheduledExecutorPool.shutdown();
this.dagManagerMetrics.cleanup();
this.houseKeepingThreadPool.shutdown();
try {
this.scheduledExecutorPool.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS);
} catch (InterruptedException e) {
log.error("Exception encountered when shutting down DagManager threads.", e);
}
}
} catch (IOException e) {
log.error("Exception encountered when activating the new DagManager", e);
throw new RuntimeException(e);
}
}
/**
* Used by the DagManager to launch a new execution for a flow action event loaded from the DagActionStore upon
* setting this instance of the DagManager to active. Because it may be a completely new DAG not contained in the
* dagStore, we compile the flow to generate the dag before calling addDag(), handling any errors that may result in
* the process.
*/
public void handleLaunchFlowEvent(DagActionStore.DagAction launchAction) {
Preconditions.checkArgument(launchAction.getFlowActionType() == DagActionStore.FlowActionType.LAUNCH);
log.info("Handle launch flow event for action {}", launchAction);
FlowId flowId = launchAction.getFlowId();
try {
URI flowUri = FlowSpec.Utils.createFlowSpecUri(flowId);
FlowSpec spec = (FlowSpec) flowCatalog.getSpecs(flowUri);
Optional<Dag<JobExecutionPlan>> optionalJobExecutionPlanDag =
this.flowCompilationValidationHelper.createExecutionPlanIfValid(spec, Optional.absent());
if (optionalJobExecutionPlanDag.isPresent()) {
addDag(optionalJobExecutionPlanDag.get(), true, true);
} else {
log.warn("Failed flow compilation of spec causing launch flow event to be skipped on startup. Flow {}", flowId);
this.dagManagerMetrics.incrementFailedLaunchCount();
}
// Upon handling the action, delete it so on leadership change this is not duplicated
this.dagActionStore.get().deleteDagAction(launchAction);
} catch (URISyntaxException e) {
log.warn(String.format("Could not create URI object for flowId %s due to exception", flowId), e);
this.dagManagerMetrics.incrementFailedLaunchCount();
} catch (SpecNotFoundException e) {
log.warn(String.format("Spec not found for flowId %s due to exception", flowId), e);
this.dagManagerMetrics.incrementFailedLaunchCount();
} catch (IOException e) {
log.warn(String.format("Failed to add Job Execution Plan for flowId %s OR delete dag action from dagActionStore "
+ "(check stacktrace) due to exception", flowId), e);
this.dagManagerMetrics.incrementFailedLaunchCount();
} catch (InterruptedException e) {
log.warn(String.format("SpecCompiler failed to reach healthy state before compilation of flowId %s due to "
+ "exception", flowId), e);
this.dagManagerMetrics.incrementFailedLaunchCount();
}
}
private void loadDagFromDagStateStore() throws IOException {
List<Dag<JobExecutionPlan>> dags = dagStateStore.getDags();
log.info("Loading " + dags.size() + " dags from dag state store");
for (Dag<JobExecutionPlan> dag : dags) {
if (this.isActive) {
addDag(dag, false, false);
}
}
}
/**
* Each {@link DagManagerThread} performs 2 actions when scheduled:
* <ol>
* <li> Dequeues any newly submitted {@link Dag}s from the Dag queue. All the {@link JobExecutionPlan}s which
* are part of the dequed {@link Dag} will be managed this thread. </li>
* <li> Polls the job status store for the current job statuses of all the running jobs it manages.</li>
* </ol>
*/
public static class DagManagerThread implements Runnable {
private final Map<DagNode<JobExecutionPlan>, Dag<JobExecutionPlan>> jobToDag = new HashMap<>();
private final Map<String, Dag<JobExecutionPlan>> dags = new HashMap<>();
private final Set<String> failedDagIds;
private final Map<String, Dag<JobExecutionPlan>> resumingDags = new HashMap<>();
// dagToJobs holds a map of dagId to running jobs of that dag
final Map<String, LinkedList<DagNode<JobExecutionPlan>>> dagToJobs = new HashMap<>();
final Map<String, Long> dagToSLA = new HashMap<>();
private final MetricContext metricContext;
private final Set<String> dagIdstoClean = new HashSet<>();
private final Optional<EventSubmitter> eventSubmitter;
private final Optional<Timer> jobStatusPolledTimer;
private final AtomicLong orchestrationDelay = new AtomicLong(0);
private final DagManagerMetrics dagManagerMetrics;
private final UserQuotaManager quotaManager;
private final JobStatusRetriever jobStatusRetriever;
private final DagStateStore dagStateStore;
private final DagStateStore failedDagStateStore;
private final BlockingQueue<Dag<JobExecutionPlan>> queue;
private final BlockingQueue<DagId> cancelQueue;
private final BlockingQueue<DagId> resumeQueue;
private final Long defaultJobStartSlaTimeMillis;
private final Optional<DagActionStore> dagActionStore;
private final Optional<Meter> dagManagerThreadHeartbeat;
/**
* Constructor.
*/
DagManagerThread(JobStatusRetriever jobStatusRetriever, DagStateStore dagStateStore, DagStateStore failedDagStateStore,
Optional<DagActionStore> dagActionStore, BlockingQueue<Dag<JobExecutionPlan>> queue, BlockingQueue<DagId> cancelQueue,
BlockingQueue<DagId> resumeQueue, boolean instrumentationEnabled, Set<String> failedDagIds, DagManagerMetrics dagManagerMetrics,
Long defaultJobStartSla, UserQuotaManager quotaManager, int dagMangerThreadId) {
this.jobStatusRetriever = jobStatusRetriever;
this.dagStateStore = dagStateStore;
this.failedDagStateStore = failedDagStateStore;
this.failedDagIds = failedDagIds;
this.queue = queue;
this.cancelQueue = cancelQueue;
this.resumeQueue = resumeQueue;
this.dagManagerMetrics = dagManagerMetrics;
this.defaultJobStartSlaTimeMillis = defaultJobStartSla;
this.quotaManager = quotaManager;
this.dagActionStore = dagActionStore;
if (instrumentationEnabled) {
this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), getClass());
this.eventSubmitter = Optional.of(new EventSubmitter.Builder(this.metricContext, "org.apache.gobblin.service").build());
this.jobStatusPolledTimer = Optional.of(this.metricContext.timer(ServiceMetricNames.JOB_STATUS_POLLED_TIMER));
ContextAwareGauge<Long> orchestrationDelayMetric = metricContext.newContextAwareGauge(ServiceMetricNames.FLOW_ORCHESTRATION_DELAY,
orchestrationDelay::get);
this.metricContext.register(orchestrationDelayMetric);
this.dagManagerThreadHeartbeat = Optional.of(this.metricContext.contextAwareMeter(String.format(DAG_MANAGER_HEARTBEAT, dagMangerThreadId)));
} else {
this.metricContext = null;
this.eventSubmitter = Optional.absent();
this.jobStatusPolledTimer = Optional.absent();
this.dagManagerThreadHeartbeat = Optional.absent();
}
}
/**
* Main body of the {@link DagManagerThread}. Deque the next item from the queue and poll job statuses of currently
* running jobs.
* Because this thread runs in a regular interval, we should avoid doing repetitive work inside it.
*/
@Override
public void run() {
try {
DagId nextDagToCancel = cancelQueue.poll();
//Poll the cancelQueue for a new Dag to cancel.
if (nextDagToCancel != null) {
cancelDag(nextDagToCancel);
}
while (!queue.isEmpty()) {
Dag<JobExecutionPlan> dag = queue.poll();
//Poll the queue for a new Dag to execute.
if (dag != null) {
if (dag.isEmpty()) {
log.warn("Empty dag; ignoring the dag");
}
//Initialize dag.
initialize(dag);
} else {
log.warn("Null dag despite non-empty queue; ignoring the dag");
}
}
while (!resumeQueue.isEmpty()) {
DagId dagId = resumeQueue.poll();
beginResumingDag(dagId);
}
finishResumingDags();
log.debug("Polling job statuses..");
//Poll and update the job statuses of running jobs.
pollAndAdvanceDag();
log.debug("Poll done.");
//Clean up any finished dags
log.debug("Cleaning up finished dags..");
cleanUp();
log.debug("Clean up done");
Instrumented.markMeter(dagManagerThreadHeartbeat);
} catch (Exception e) {
log.error(String.format("Exception encountered in %s", getClass().getName()), e);
}
}
private void removeDagActionFromStore(DagId dagId, DagActionStore.FlowActionType flowActionType) throws IOException {
if (this.dagActionStore.isPresent()) {
this.dagActionStore.get().deleteDagAction(
new DagActionStore.DagAction(dagId.flowGroup, dagId.flowName, dagId.flowExecutionId, flowActionType));
}
}
/**
* Begin resuming a dag by setting the status of both the dag and the failed/cancelled dag nodes to {@link ExecutionStatus#PENDING_RESUME},
* and also sending events so that this status will be reflected in the job status state store.
*/
private void beginResumingDag(DagId dagIdToResume) throws IOException {
String dagId= dagIdToResume.toString();
if (!this.failedDagIds.contains(dagId)) {
log.warn("No dag found with dagId " + dagId + ", so cannot resume flow");
removeDagActionFromStore(dagIdToResume, DagActionStore.FlowActionType.RESUME);
return;
}
Dag<JobExecutionPlan> dag = this.failedDagStateStore.getDag(dagId);
if (dag == null) {
log.error("Dag " + dagId + " was found in memory but not found in failed dag state store");
removeDagActionFromStore(dagIdToResume, DagActionStore.FlowActionType.RESUME);
return;
}
long flowResumeTime = System.currentTimeMillis();
// Set the flow and it's failed or cancelled nodes to PENDING_RESUME so that the flow will be resumed from the point before it failed
DagManagerUtils.emitFlowEvent(this.eventSubmitter, dag, TimingEvent.FlowTimings.FLOW_PENDING_RESUME);
for (DagNode<JobExecutionPlan> node : dag.getNodes()) {
ExecutionStatus executionStatus = node.getValue().getExecutionStatus();
if (executionStatus.equals(FAILED) || executionStatus.equals(CANCELLED)) {
node.getValue().setExecutionStatus(PENDING_RESUME);
// reset currentAttempts because we do not want to count previous execution's attempts in deciding whether to retry a job
node.getValue().setCurrentAttempts(0);
DagManagerUtils.incrementJobGeneration(node);
Map<String, String> jobMetadata = TimingEventUtils.getJobMetadata(Maps.newHashMap(), node.getValue());
this.eventSubmitter.get().getTimingEvent(TimingEvent.LauncherTimings.JOB_PENDING_RESUME).stop(jobMetadata);
}
// Set flowStartTime so that flow SLA will be based on current time instead of original flow
node.getValue().setFlowStartTime(flowResumeTime);
}
this.resumingDags.put(dagId, dag);
}
/**
* Finish resuming dags by first verifying the status is correct (flow should be {@link ExecutionStatus#PENDING_RESUME}
* and jobs should not be {@link ExecutionStatus#FAILED} or {@link ExecutionStatus#CANCELLED}) and then calling
* {@link #initialize}. This is separated from {@link #beginResumingDag} because it could take some time for the
* job status state store to reflect the updated status.
*/
private void finishResumingDags() throws IOException {
for (Map.Entry<String, Dag<JobExecutionPlan>> dag : this.resumingDags.entrySet()) {
JobStatus flowStatus = pollFlowStatus(dag.getValue());
if (flowStatus == null || !flowStatus.getEventName().equals(PENDING_RESUME.name())) {
continue;
}
boolean dagReady = true;
for (DagNode<JobExecutionPlan> node : dag.getValue().getNodes()) {
JobStatus jobStatus = pollJobStatus(node);
if (jobStatus == null || jobStatus.getEventName().equals(FAILED.name()) || jobStatus.getEventName().equals(CANCELLED.name())) {
dagReady = false;
break;
}
}
if (dagReady) {
this.dagStateStore.writeCheckpoint(dag.getValue());
this.failedDagStateStore.cleanUp(dag.getValue());
removeDagActionFromStore(DagManagerUtils.generateDagId(dag.getValue()), DagActionStore.FlowActionType.RESUME);
this.failedDagIds.remove(dag.getKey());
this.resumingDags.remove(dag.getKey());
initialize(dag.getValue());
}
}
}
/**
* Cancels the dag and sends a cancellation tracking event.
* @param dagToCancel dag node to cancel
* @throws ExecutionException executionException
* @throws InterruptedException interruptedException
*/
private void cancelDag(DagId dagId) throws ExecutionException, InterruptedException, IOException {
String dagToCancel = dagId.toString();
log.info("Cancel flow with DagId {}", dagToCancel);
if (this.dagToJobs.containsKey(dagToCancel)) {
List<DagNode<JobExecutionPlan>> dagNodesToCancel = this.dagToJobs.get(dagToCancel);
log.info("Found {} DagNodes to cancel.", dagNodesToCancel.size());
for (DagNode<JobExecutionPlan> dagNodeToCancel : dagNodesToCancel) {
cancelDagNode(dagNodeToCancel);
}
this.dags.get(dagToCancel).setFlowEvent(TimingEvent.FlowTimings.FLOW_CANCELLED);
this.dags.get(dagToCancel).setMessage("Flow killed by request");
} else {
log.warn("Did not find Dag with id {}, it might be already cancelled/finished.", dagToCancel);
}
// Called after a KILL request is received
removeDagActionFromStore(dagId, DagActionStore.FlowActionType.KILL);
}
private void cancelDagNode(DagNode<JobExecutionPlan> dagNodeToCancel) throws ExecutionException, InterruptedException {
Properties props = new Properties();
if (dagNodeToCancel.getValue().getJobFuture().isPresent()) {
Future future = dagNodeToCancel.getValue().getJobFuture().get();
String serializedFuture = DagManagerUtils.getSpecProducer(dagNodeToCancel).serializeAddSpecResponse(future);
props.put(ConfigurationKeys.SPEC_PRODUCER_SERIALIZED_FUTURE, serializedFuture);
sendCancellationEvent(dagNodeToCancel.getValue());
}
if (dagNodeToCancel.getValue().getJobSpec().getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
props.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY,
dagNodeToCancel.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
}
DagManagerUtils.getSpecProducer(dagNodeToCancel).cancelJob(dagNodeToCancel.getValue().getJobSpec().getUri(), props);
}
private void sendCancellationEvent(JobExecutionPlan jobExecutionPlan) {
if (this.eventSubmitter.isPresent()) {
Map<String, String> jobMetadata = TimingEventUtils.getJobMetadata(Maps.newHashMap(), jobExecutionPlan);
this.eventSubmitter.get().getTimingEvent(TimingEvent.LauncherTimings.JOB_CANCEL).stop(jobMetadata);
jobExecutionPlan.setExecutionStatus(CANCELLED);
}
}
/**
* This method determines the next set of jobs to execute from the dag and submits them for execution.
* This method updates internal data structures tracking currently running Dags and jobs.
*/
private void initialize(Dag<JobExecutionPlan> dag)
throws IOException {
//Add Dag to the map of running dags
String dagId = DagManagerUtils.generateDagId(dag).toString();
log.info("Initializing Dag {}", DagManagerUtils.getFullyQualifiedDagName(dag));
if (this.dags.containsKey(dagId)) {
log.warn("Already tracking a dag with dagId {}, skipping.", dagId);
return;
}
this.dags.put(dagId, dag);
log.debug("Dag {} - determining if any jobs are already running.", DagManagerUtils.getFullyQualifiedDagName(dag));
//A flag to indicate if the flow is already running.
boolean isDagRunning = false;
//Are there any jobs already in the running state? This check is for Dags already running
//before a leadership change occurs.
for (DagNode<JobExecutionPlan> dagNode : dag.getNodes()) {
if (DagManagerUtils.getExecutionStatus(dagNode) == RUNNING) {
addJobState(dagId, dagNode);
//Update the running jobs counter.
dagManagerMetrics.incrementRunningJobMetrics(dagNode);
isDagRunning = true;
}
}
FlowId flowId = DagManagerUtils.getFlowId(dag);
this.dagManagerMetrics.registerFlowMetric(flowId, dag);
log.debug("Dag {} submitting jobs ready for execution.", DagManagerUtils.getFullyQualifiedDagName(dag));
//Determine the next set of jobs to run and submit them for execution
Map<String, Set<DagNode<JobExecutionPlan>>> nextSubmitted = submitNext(dagId);
for (DagNode<JobExecutionPlan> dagNode: nextSubmitted.get(dagId)) {
addJobState(dagId, dagNode);
}
// Set flow status to running
DagManagerUtils.emitFlowEvent(this.eventSubmitter, dag, TimingEvent.FlowTimings.FLOW_RUNNING);
dagManagerMetrics.conditionallyMarkFlowAsState(flowId, FlowState.RUNNING);
// Report the orchestration delay the first time the Dag is initialized. Orchestration delay is defined as
// the time difference between the instant when a flow first transitions to the running state and the instant
// when the flow is submitted to Gobblin service.
if (!isDagRunning) {
this.orchestrationDelay.set(System.currentTimeMillis() - DagManagerUtils.getFlowExecId(dag));
}
log.info("Dag {} Initialization complete.", DagManagerUtils.getFullyQualifiedDagName(dag));
}
/**
* Proceed the execution of each dag node based on job status.
*/
private void pollAndAdvanceDag() throws IOException, ExecutionException, InterruptedException {
Map<String, Set<DagNode<JobExecutionPlan>>> nextSubmitted = Maps.newHashMap();
List<DagNode<JobExecutionPlan>> nodesToCleanUp = Lists.newArrayList();
for (DagNode<JobExecutionPlan> node : this.jobToDag.keySet()) {
try {
boolean slaKilled = slaKillIfNeeded(node);
JobStatus jobStatus = pollJobStatus(node);
boolean killOrphanFlow = killJobIfOrphaned(node, jobStatus);
ExecutionStatus status = getJobExecutionStatus(slaKilled, killOrphanFlow, jobStatus);
JobExecutionPlan jobExecutionPlan = DagManagerUtils.getJobExecutionPlan(node);
switch (status) {
case COMPLETE:
jobExecutionPlan.setExecutionStatus(COMPLETE);
nextSubmitted.putAll(onJobFinish(node));
nodesToCleanUp.add(node);
break;
case FAILED:
jobExecutionPlan.setExecutionStatus(FAILED);
nextSubmitted.putAll(onJobFinish(node));
nodesToCleanUp.add(node);
break;
case CANCELLED:
jobExecutionPlan.setExecutionStatus(CANCELLED);
nextSubmitted.putAll(onJobFinish(node));
nodesToCleanUp.add(node);
break;
case PENDING:
jobExecutionPlan.setExecutionStatus(PENDING);
break;
case PENDING_RETRY:
jobExecutionPlan.setExecutionStatus(PENDING_RETRY);
break;
default:
jobExecutionPlan.setExecutionStatus(RUNNING);
break;
}
if (jobStatus != null && jobStatus.isShouldRetry()) {
log.info("Retrying job: {}, current attempts: {}, max attempts: {}", DagManagerUtils.getFullyQualifiedJobName(node),
jobStatus.getCurrentAttempts(), jobStatus.getMaxAttempts());
this.jobToDag.get(node).setFlowEvent(null);
submitJob(node);
}
} catch (Exception e) {
// Error occurred while processing dag, continue processing other dags assigned to this thread
log.error(String.format("Exception caught in DagManager while processing dag %s due to ",
DagManagerUtils.getFullyQualifiedDagName(node)), e);
}
}
for (Map.Entry<String, Set<DagNode<JobExecutionPlan>>> entry: nextSubmitted.entrySet()) {
String dagId = entry.getKey();
Set<DagNode<JobExecutionPlan>> dagNodes = entry.getValue();
for (DagNode<JobExecutionPlan> dagNode: dagNodes) {
addJobState(dagId, dagNode);
}
}
for (DagNode<JobExecutionPlan> dagNode: nodesToCleanUp) {
String dagId = DagManagerUtils.generateDagId(dagNode).toString();
deleteJobState(dagId, dagNode);
}
}
/**
* Cancel the job if the job has been "orphaned". A job is orphaned if has been in ORCHESTRATED
* {@link ExecutionStatus} for some specific amount of time.
* @param node {@link DagNode} representing the job
* @param jobStatus current {@link JobStatus} of the job
* @return true if the total time that the job remains in the ORCHESTRATED state exceeds
* {@value ConfigurationKeys#GOBBLIN_JOB_START_SLA_TIME}.
*/
private boolean killJobIfOrphaned(DagNode<JobExecutionPlan> node, JobStatus jobStatus)
throws ExecutionException, InterruptedException {
if (jobStatus == null) {
return false;
}
ExecutionStatus executionStatus = valueOf(jobStatus.getEventName());
long timeOutForJobStart = DagManagerUtils.getJobStartSla(node, this.defaultJobStartSlaTimeMillis);
long jobOrchestratedTime = jobStatus.getOrchestratedTime();
if (executionStatus == ORCHESTRATED && System.currentTimeMillis() - jobOrchestratedTime > timeOutForJobStart) {
log.info("Job {} of flow {} exceeded the job start SLA of {} ms. Killing the job now...",
DagManagerUtils.getJobName(node),
DagManagerUtils.getFullyQualifiedDagName(node),
timeOutForJobStart);
dagManagerMetrics.incrementCountsStartSlaExceeded(node);
cancelDagNode(node);
String dagId = DagManagerUtils.generateDagId(node).toString();
this.dags.get(dagId).setFlowEvent(TimingEvent.FlowTimings.FLOW_START_DEADLINE_EXCEEDED);
this.dags.get(dagId).setMessage("Flow killed because no update received for " + timeOutForJobStart + " ms after orchestration");
return true;
} else {
return false;
}
}
private ExecutionStatus getJobExecutionStatus(boolean slaKilled, boolean killOrphanFlow, JobStatus jobStatus) {
if (slaKilled || killOrphanFlow) {
return CANCELLED;
} else {
if (jobStatus == null) {
return PENDING;
} else {
return valueOf(jobStatus.getEventName());
}
}
}
/**
* Check if the SLA is configured for the flow this job belongs to.
* If it is, this method will try to cancel the job when SLA is reached.
*
* @param node dag node of the job
* @return true if the job is killed because it reached sla
* @throws ExecutionException exception
* @throws InterruptedException exception
*/
private boolean slaKillIfNeeded(DagNode<JobExecutionPlan> node) throws ExecutionException, InterruptedException {
long flowStartTime = DagManagerUtils.getFlowStartTime(node);
long currentTime = System.currentTimeMillis();
String dagId = DagManagerUtils.generateDagId(node).toString();
long flowSla;
if (dagToSLA.containsKey(dagId)) {
flowSla = dagToSLA.get(dagId);
} else {
try {
flowSla = DagManagerUtils.getFlowSLA(node);
} catch (ConfigException e) {
log.warn("Flow SLA for flowGroup: {}, flowName: {} is given in invalid format, using default SLA of {}",
node.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY),
node.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY),
DagManagerUtils.DEFAULT_FLOW_SLA_MILLIS);
flowSla = DagManagerUtils.DEFAULT_FLOW_SLA_MILLIS;
}
dagToSLA.put(dagId, flowSla);
}
if (currentTime > flowStartTime + flowSla) {
log.info("Flow {} exceeded the SLA of {} ms. Killing the job {} now...",
node.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), flowSla,
node.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY));
dagManagerMetrics.incrementExecutorSlaExceeded(node);
cancelDagNode(node);
this.dags.get(dagId).setFlowEvent(TimingEvent.FlowTimings.FLOW_RUN_DEADLINE_EXCEEDED);
this.dags.get(dagId).setMessage("Flow killed due to exceeding SLA of " + flowSla + " ms");
return true;
}
return false;
}
/**
* Retrieve the {@link JobStatus} from the {@link JobExecutionPlan}.
*/
private JobStatus pollJobStatus(DagNode<JobExecutionPlan> dagNode) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
String flowGroup = jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY);
String flowName = jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY);
long flowExecutionId = jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
String jobGroup = jobConfig.getString(ConfigurationKeys.JOB_GROUP_KEY);
String jobName = jobConfig.getString(ConfigurationKeys.JOB_NAME_KEY);
return pollStatus(flowGroup, flowName, flowExecutionId, jobGroup, jobName);
}
/**
* Retrieve the flow's {@link JobStatus} (i.e. job status with {@link JobStatusRetriever#NA_KEY} as job name/group) from a dag
*/
private JobStatus pollFlowStatus(Dag<JobExecutionPlan> dag) {
if (dag == null || dag.isEmpty()) {
return null;
}
Config jobConfig = dag.getNodes().get(0).getValue().getJobSpec().getConfig();
String flowGroup = jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY);
String flowName = jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY);
long flowExecutionId = jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
return pollStatus(flowGroup, flowName, flowExecutionId, JobStatusRetriever.NA_KEY, JobStatusRetriever.NA_KEY);
}
private JobStatus pollStatus(String flowGroup, String flowName, long flowExecutionId, String jobGroup, String jobName) {
long pollStartTime = System.nanoTime();
Iterator<JobStatus> jobStatusIterator =
this.jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId, jobName, jobGroup);
Instrumented.updateTimer(this.jobStatusPolledTimer, System.nanoTime() - pollStartTime, TimeUnit.NANOSECONDS);
if (jobStatusIterator.hasNext()) {
return jobStatusIterator.next();
} else {
return null;
}
}
/**
* Submit next set of Dag nodes in the Dag identified by the provided dagId
* @param dagId The dagId that should be processed.
* @return
* @throws IOException
*/
synchronized Map<String, Set<DagNode<JobExecutionPlan>>> submitNext(String dagId) throws IOException {
Dag<JobExecutionPlan> dag = this.dags.get(dagId);
Set<DagNode<JobExecutionPlan>> nextNodes = DagManagerUtils.getNext(dag);
List<String> nextJobNames = new ArrayList<>();
//Submit jobs from the dag ready for execution.
for (DagNode<JobExecutionPlan> dagNode : nextNodes) {
submitJob(dagNode);
nextJobNames.add(DagManagerUtils.getJobName(dagNode));
}
log.info("Submitting next nodes for dagId {}, where next jobs to be submitted are {}", dagId, nextJobNames);
//Checkpoint the dag state
this.dagStateStore.writeCheckpoint(dag);
Map<String, Set<DagNode<JobExecutionPlan>>> dagIdToNextJobs = Maps.newHashMap();
dagIdToNextJobs.put(dagId, nextNodes);
return dagIdToNextJobs;
}
/**
* Submits a {@link JobSpec} to a {@link org.apache.gobblin.runtime.api.SpecExecutor}.
*/
private void submitJob(DagNode<JobExecutionPlan> dagNode) {
DagManagerUtils.incrementJobAttempt(dagNode);
JobExecutionPlan jobExecutionPlan = DagManagerUtils.getJobExecutionPlan(dagNode);
jobExecutionPlan.setExecutionStatus(RUNNING);
JobSpec jobSpec = DagManagerUtils.getJobSpec(dagNode);
Map<String, String> jobMetadata = TimingEventUtils.getJobMetadata(Maps.newHashMap(), jobExecutionPlan);
String specExecutorUri = DagManagerUtils.getSpecExecutorUri(dagNode);
// Run this spec on selected executor
SpecProducer<Spec> producer;
try {
quotaManager.checkQuota(Collections.singleton(dagNode));
producer = DagManagerUtils.getSpecProducer(dagNode);
TimingEvent jobOrchestrationTimer = this.eventSubmitter.isPresent() ? this.eventSubmitter.get().
getTimingEvent(TimingEvent.LauncherTimings.JOB_ORCHESTRATED) : null;
// Increment job count before submitting the job onto the spec producer, in case that throws an exception.
// By this point the quota is allocated, so it's imperative to increment as missing would introduce the potential to decrement below zero upon quota release.
// Quota release is guaranteed, despite failure, because exception handling within would mark the job FAILED.
// When the ensuing kafka message spurs DagManager processing, the quota is released and the counts decremented
// Ensure that we do not double increment for flows that are retried
if (dagNode.getValue().getCurrentAttempts() == 1) {
dagManagerMetrics.incrementRunningJobMetrics(dagNode);
}
// Submit the job to the SpecProducer, which in turn performs the actual job submission to the SpecExecutor instance.
// The SpecProducer implementations submit the job to the underlying executor and return when the submission is complete,
// either successfully or unsuccessfully. To catch any exceptions in the job submission, the DagManagerThread
// blocks (by calling Future#get()) until the submission is completed.
Future<?> addSpecFuture = producer.addSpec(jobSpec);
dagNode.getValue().setJobFuture(Optional.of(addSpecFuture));
//Persist the dag
this.dagStateStore.writeCheckpoint(this.dags.get(DagManagerUtils.generateDagId(dagNode).toString()));
addSpecFuture.get();
jobMetadata.put(TimingEvent.METADATA_MESSAGE, producer.getExecutionLink(addSpecFuture, specExecutorUri));
// Add serialized job properties as part of the orchestrated job event metadata
jobMetadata.put(JobExecutionPlan.JOB_PROPS_KEY, dagNode.getValue().toString());
if (jobOrchestrationTimer != null) {
jobOrchestrationTimer.stop(jobMetadata);
}
log.info("Orchestrated job: {} on Executor: {}", DagManagerUtils.getFullyQualifiedJobName(dagNode), specExecutorUri);
this.dagManagerMetrics.incrementJobsSentToExecutor(dagNode);
} catch (Exception e) {
TimingEvent jobFailedTimer = this.eventSubmitter.isPresent() ? this.eventSubmitter.get().
getTimingEvent(TimingEvent.LauncherTimings.JOB_FAILED) : null;
String message = "Cannot submit job " + DagManagerUtils.getFullyQualifiedJobName(dagNode) + " on executor " + specExecutorUri;
log.error(message, e);
jobMetadata.put(TimingEvent.METADATA_MESSAGE, message + " due to " + e.getMessage());
if (jobFailedTimer != null) {
jobFailedTimer.stop(jobMetadata);
}
}
}
/**
* Method that defines the actions to be performed when a job finishes either successfully or with failure.
* This method updates the state of the dag and performs clean up actions as necessary.
*/
private Map<String, Set<DagNode<JobExecutionPlan>>> onJobFinish(DagNode<JobExecutionPlan> dagNode)
throws IOException {
Dag<JobExecutionPlan> dag = this.jobToDag.get(dagNode);
String dagId = DagManagerUtils.generateDagId(dag).toString();
String jobName = DagManagerUtils.getFullyQualifiedJobName(dagNode);
ExecutionStatus jobStatus = DagManagerUtils.getExecutionStatus(dagNode);
log.info("Job {} of Dag {} has finished with status {}", jobName, dagId, jobStatus.name());
// Only decrement counters and quota for jobs that actually ran on the executor, not from a GaaS side failure/skip event
if (quotaManager.releaseQuota(dagNode)) {
dagManagerMetrics.decrementRunningJobMetrics(dagNode);
}
switch (jobStatus) {
case FAILED:
dag.setMessage("Flow failed because job " + jobName + " failed");
dag.setFlowEvent(TimingEvent.FlowTimings.FLOW_FAILED);
dagManagerMetrics.incrementExecutorFailed(dagNode);
return Maps.newHashMap();
case CANCELLED:
dag.setFlowEvent(TimingEvent.FlowTimings.FLOW_CANCELLED);
return Maps.newHashMap();
case COMPLETE:
dagManagerMetrics.incrementExecutorSuccess(dagNode);
return submitNext(dagId);
default:
log.warn("It should not reach here. Job status is unexpected.");
return Maps.newHashMap();
}
}
private void deleteJobState(String dagId, DagNode<JobExecutionPlan> dagNode) {
this.jobToDag.remove(dagNode);
this.dagToJobs.get(dagId).remove(dagNode);
this.dagToSLA.remove(dagId);
}
private void addJobState(String dagId, DagNode<JobExecutionPlan> dagNode) {
Dag<JobExecutionPlan> dag = this.dags.get(dagId);
this.jobToDag.put(dagNode, dag);
if (this.dagToJobs.containsKey(dagId)) {
this.dagToJobs.get(dagId).add(dagNode);
} else {
LinkedList<DagNode<JobExecutionPlan>> dagNodeList = Lists.newLinkedList();
dagNodeList.add(dagNode);
this.dagToJobs.put(dagId, dagNodeList);
}
}
private boolean hasRunningJobs(String dagId) {
List<DagNode<JobExecutionPlan>> dagNodes = this.dagToJobs.get(dagId);
return dagNodes != null && !dagNodes.isEmpty();
}
/**
* Perform clean up. Remove a dag from the dagstore if the dag is complete and update internal state.
*/
private void cleanUp() {
// Approximate the time when the flow events are emitted to account for delay when the flow event is received by the job monitor
long cleanUpProcessingTime = System.currentTimeMillis();
// Remove dags that are finished and emit their appropriate metrics
for (Map.Entry<String, Dag<JobExecutionPlan>> dagIdKeyPair : this.dags.entrySet()) {
String dagId = dagIdKeyPair.getKey();
// On service restart, we repopulate the dags that are waiting to be cleaned up
if (dagIdstoClean.contains(dagId)) {
continue;
}
Dag<JobExecutionPlan> dag = dagIdKeyPair.getValue();
if ((TimingEvent.FlowTimings.FLOW_FAILED.equals(dag.getFlowEvent()) || TimingEvent.FlowTimings.FLOW_CANCELLED.equals(dag.getFlowEvent())) &&
DagManagerUtils.getFailureOption(dag) == FailureOption.FINISH_RUNNING) {
//Skip monitoring of any other jobs of the failed dag.
LinkedList<DagNode<JobExecutionPlan>> dagNodeList = this.dagToJobs.get(dagId);
while (!dagNodeList.isEmpty()) {
DagNode<JobExecutionPlan> dagNode = dagNodeList.poll();
deleteJobState(dagId, dagNode);
}
}
if (!hasRunningJobs(dagId)) {
// Collect all the dagIds that are finished
this.dagIdstoClean.add(dagId);
if (dag.getFlowEvent() == null) {
// If the dag flow event is not set, then it is successful
dag.setFlowEvent(TimingEvent.FlowTimings.FLOW_SUCCEEDED);
} else {
addFailedDag(dagId, dag);
}
// send an event before cleaning up dag
DagManagerUtils.emitFlowEvent(this.eventSubmitter, this.dags.get(dagId), dag.getFlowEvent());
dag.setEventEmittedTimeMillis(cleanUpProcessingTime);
}
}
// Only clean up dags after the job status monitor processed the flow event
for (Iterator<String> dagIdIterator = this.dagIdstoClean.iterator(); dagIdIterator.hasNext();) {
String dagId = dagIdIterator.next();
Dag<JobExecutionPlan> dag = this.dags.get(dagId);
JobStatus flowStatus = pollFlowStatus(dag);
if (flowStatus != null && FlowStatusGenerator.FINISHED_STATUSES.contains(flowStatus.getEventName())) {
FlowId flowId = DagManagerUtils.getFlowId(dag);
switch(dag.getFlowEvent()) {
case TimingEvent.FlowTimings.FLOW_SUCCEEDED:
this.dagManagerMetrics.emitFlowSuccessMetrics(flowId);
this.dagManagerMetrics.conditionallyMarkFlowAsState(flowId, FlowState.SUCCESSFUL);
break;
case TimingEvent.FlowTimings.FLOW_FAILED:
this.dagManagerMetrics.emitFlowFailedMetrics(flowId);
this.dagManagerMetrics.conditionallyMarkFlowAsState(flowId, FlowState.FAILED);
break;
case TimingEvent.FlowTimings.FLOW_CANCELLED:
this.dagManagerMetrics.emitFlowSlaExceededMetrics(flowId);
this.dagManagerMetrics.conditionallyMarkFlowAsState(flowId, FlowState.FAILED);
break;
default:
log.warn("Unexpected flow event {} for dag {}", dag.getFlowEvent(), dagId);
}
log.info("Dag {} has finished with status {}; Cleaning up dag from the state store.", dagId, dag.getFlowEvent());
cleanUpDag(dagId);
dagIdIterator.remove();
} else if (cleanUpProcessingTime > dag.getEventEmittedTimeMillis() + DAG_FLOW_STATUS_TOLERANCE_TIME_MILLIS) {
// Re-emit the flow event if the flow status has not been processed within the DagFlowStatusTolerance time
DagManagerUtils.emitFlowEvent(this.eventSubmitter, dag, dag.getFlowEvent());
} else {
log.info("Waiting for flow event {} to be emitted before cleaning up dag {}", dag.getFlowEvent(), dagId);
}
}
}
/**
* Add a dag to failed dag state store
*/
private synchronized void addFailedDag(String dagId, Dag<JobExecutionPlan> dag) {
try {
log.info("Adding dag " + dagId + " to failed dag state store");
this.failedDagStateStore.writeCheckpoint(this.dags.get(dagId));
} catch (IOException e) {
log.error("Failed to add dag " + dagId + " to failed dag state store", e);
}
this.failedDagIds.add(dagId);
}
/**
* Note that removal of a {@link Dag} entry in {@link #dags} needs to be happen after {@link #cleanUp()}
* since the real {@link Dag} object is required for {@link #cleanUp()},
* and cleaning of all relevant states need to be atomic
* @param dagId
*/
private synchronized void cleanUpDag(String dagId) {
log.info("Cleaning up dagId {}", dagId);
// clears flow event after cancelled job to allow resume event status to be set
this.dags.get(dagId).setFlowEvent(null);
try {
this.dagStateStore.cleanUp(dags.get(dagId));
} catch (IOException ioe) {
log.error(String.format("Failed to clean %s from backStore due to:", dagId), ioe);
}
this.dags.remove(dagId);
this.dagToJobs.remove(dagId);
}
}
public enum FlowState {
FAILED(-1),
RUNNING(0),
SUCCESSFUL(1);
public int value;
FlowState(int value) {
this.value = value;
}
}
/**
* Thread that runs retention on failed dags based on their original start time (which is the flow execution ID).
*/
public static class FailedDagRetentionThread implements Runnable {
private final DagStateStore failedDagStateStore;
private final Set<String> failedDagIds;
private final long failedDagRetentionTime;
FailedDagRetentionThread(DagStateStore failedDagStateStore, Set<String> failedDagIds, long failedDagRetentionTime) {
this.failedDagStateStore = failedDagStateStore;
this.failedDagIds = failedDagIds;
this.failedDagRetentionTime = failedDagRetentionTime;
}
@Override
public void run() {
try {
log.info("Cleaning failed dag state store");
long startTime = System.currentTimeMillis();
int numCleaned = 0;
Set<String> failedDagIdsCopy = new HashSet<>(this.failedDagIds);
for (String dagId : failedDagIdsCopy) {
if (this.failedDagRetentionTime > 0L && startTime > DagManagerUtils.getFlowExecId(dagId) + this.failedDagRetentionTime) {
this.failedDagStateStore.cleanUp(dagId);
this.failedDagIds.remove(dagId);
numCleaned++;
}
}
log.info("Cleaned " + numCleaned + " dags from the failed dag state store");
} catch (Exception e) {
log.error("Failed to run retention on failed dag state store", e);
}
}
}
/** Stop the service. */
@Override
protected void shutDown()
throws Exception {
this.scheduledExecutorPool.shutdown();
this.scheduledExecutorPool.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS);
}
}
| 3,900 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/DagManagerUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.RequesterService;
import org.apache.gobblin.service.ServiceRequester;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode;
import org.apache.gobblin.service.modules.orchestration.DagManager.FailureOption;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.util.ConfigUtils;
public class DagManagerUtils {
static long DEFAULT_FLOW_SLA_MILLIS = TimeUnit.HOURS.toMillis(24);
static String QUOTA_KEY_SEPERATOR = ",";
static FlowId getFlowId(Dag<JobExecutionPlan> dag) {
return getFlowId(dag.getStartNodes().get(0));
}
static FlowId getFlowId(DagNode<JobExecutionPlan> dagNode) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
String flowGroup = jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY);
String flowName = jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY);
return new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
}
static long getFlowExecId(Dag<JobExecutionPlan> dag) {
return getFlowExecId(dag.getStartNodes().get(0));
}
static long getFlowExecId(DagNode<JobExecutionPlan> dagNode) {
return getFlowExecId(dagNode.getValue().getJobSpec());
}
static long getFlowExecId(JobSpec jobSpec) {
return jobSpec.getConfig().getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
}
static long getFlowExecId(String dagId) {
return Long.parseLong(dagId.substring(dagId.lastIndexOf('_') + 1));
}
/**
* Generate a dagId object from the given {@link Dag} instance.
* @param dag instance of a {@link Dag}.
* @return a DagId object associated corresponding to the {@link Dag} instance.
*/
public static DagManager.DagId generateDagId(Dag<JobExecutionPlan> dag) {
return generateDagId(dag.getStartNodes().get(0).getValue().getJobSpec().getConfig());
}
private static DagManager.DagId generateDagId(Config jobConfig) {
String flowGroup = jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY);
String flowName = jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY);
long flowExecutionId = jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
return new DagManager.DagId(flowGroup, flowName, String.valueOf(flowExecutionId));
}
static DagManager.DagId generateDagId(Dag.DagNode<JobExecutionPlan> dagNode) {
return generateDagId(dagNode.getValue().getJobSpec().getConfig());
}
static DagManager.DagId generateDagId(String flowGroup, String flowName, long flowExecutionId) {
return generateDagId(flowGroup, flowName, String.valueOf(flowExecutionId));
}
static DagManager.DagId generateDagId(String flowGroup, String flowName, String flowExecutionId) {
return new DagManager.DagId(flowGroup, flowName, flowExecutionId);
}
/**
* Returns a fully-qualified {@link Dag} name that includes: (flowGroup, flowName, flowExecutionId).
* @param dag
* @return fully qualified name of the underlying {@link Dag}.
*/
static String getFullyQualifiedDagName(Dag<JobExecutionPlan> dag) {
FlowId flowid = getFlowId(dag);
long flowExecutionId = getFlowExecId(dag);
return "(flowGroup: " + flowid.getFlowGroup() + ", flowName: " + flowid.getFlowName() + ", flowExecutionId: " + flowExecutionId + ")";
}
/**
* Returns a fully-qualified {@link Dag} name that includes: (flowGroup, flowName, flowExecutionId).
* @param dagNode
* @return fully qualified name of the underlying {@link Dag}.
*/
static String getFullyQualifiedDagName(DagNode<JobExecutionPlan> dagNode) {
FlowId flowid = getFlowId(dagNode);
long flowExecutionId = getFlowExecId(dagNode);
return "(flowGroup: " + flowid.getFlowGroup() + ", flowName: " + flowid.getFlowName() + ", flowExecutionId: " + flowExecutionId + ")";
}
static String getJobName(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY);
}
/**
* Returns a fully-qualified job name that includes: (flowGroup, flowName, flowExecutionId, jobName).
* @param dagNode
* @return a fully qualified name of the underlying job.
*/
static String getFullyQualifiedJobName(DagNode<JobExecutionPlan> dagNode) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
String flowGroup = ConfigUtils.getString(jobConfig, ConfigurationKeys.FLOW_GROUP_KEY, "");
String flowName = ConfigUtils.getString(jobConfig, ConfigurationKeys.FLOW_NAME_KEY, "");
Long flowExecutionId = ConfigUtils.getLong(jobConfig, ConfigurationKeys.FLOW_EXECUTION_ID_KEY, 0L);
String jobName = ConfigUtils.getString(jobConfig, ConfigurationKeys.JOB_NAME_KEY, "");
return "(flowGroup: " + flowGroup + ", flowName: " + flowName + ", flowExecutionId: " + flowExecutionId + ", jobName: " + jobName + ")";
}
static JobExecutionPlan getJobExecutionPlan(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue();
}
public static JobSpec getJobSpec(DagNode<JobExecutionPlan> dagNode) {
JobSpec jobSpec = dagNode.getValue().getJobSpec();
Map<String, String> configWithCurrentAttempts = ImmutableMap.of(ConfigurationKeys.JOB_CURRENT_ATTEMPTS, String.valueOf(dagNode.getValue().getCurrentAttempts()),
ConfigurationKeys.JOB_CURRENT_GENERATION, String.valueOf(dagNode.getValue().getCurrentGeneration()));
Properties configAsProperties = new Properties(jobSpec.getConfigAsProperties());
configAsProperties.putAll(configWithCurrentAttempts);
//Return new spec with new config to avoid change the reference to dagNode
return new JobSpec(jobSpec.getUri(), jobSpec.getVersion(), jobSpec.getDescription(), ConfigFactory.parseMap(configWithCurrentAttempts).withFallback(jobSpec.getConfig()),
configAsProperties, jobSpec.getTemplateURI(), jobSpec.getJobTemplate(), jobSpec.getMetadata());
}
static Config getJobConfig(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue().getJobSpec().getConfig();
}
static SpecProducer<Spec> getSpecProducer(DagNode<JobExecutionPlan> dagNode)
throws ExecutionException, InterruptedException {
return dagNode.getValue().getSpecExecutor().getProducer().get();
}
static ExecutionStatus getExecutionStatus(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue().getExecutionStatus();
}
/**
* Traverse the dag to determine the next set of nodes to be executed. It starts with the startNodes of the dag and
* identifies each node yet to be executed and for which each of its parent nodes is in the {@link ExecutionStatus#COMPLETE}
* state.
*/
static Set<DagNode<JobExecutionPlan>> getNext(Dag<JobExecutionPlan> dag) {
Set<DagNode<JobExecutionPlan>> nextNodesToExecute = new HashSet<>();
LinkedList<DagNode<JobExecutionPlan>> nodesToExpand = Lists.newLinkedList(dag.getStartNodes());
FailureOption failureOption = getFailureOption(dag);
while (!nodesToExpand.isEmpty()) {
DagNode<JobExecutionPlan> node = nodesToExpand.poll();
ExecutionStatus executionStatus = getExecutionStatus(node);
boolean addFlag = true;
if (executionStatus == ExecutionStatus.PENDING || executionStatus == ExecutionStatus.PENDING_RETRY
|| executionStatus == ExecutionStatus.PENDING_RESUME) {
//Add a node to be executed next, only if all of its parent nodes are COMPLETE.
List<DagNode<JobExecutionPlan>> parentNodes = dag.getParents(node);
for (DagNode<JobExecutionPlan> parentNode : parentNodes) {
if (getExecutionStatus(parentNode) != ExecutionStatus.COMPLETE) {
addFlag = false;
break;
}
}
if (addFlag) {
nextNodesToExecute.add(node);
}
} else if (executionStatus == ExecutionStatus.COMPLETE) {
//Explore the children of COMPLETED node as next candidates for execution.
nodesToExpand.addAll(dag.getChildren(node));
} else if ((executionStatus == ExecutionStatus.FAILED) || (executionStatus == ExecutionStatus.CANCELLED)) {
switch (failureOption) {
case FINISH_RUNNING:
return new HashSet<>();
case FINISH_ALL_POSSIBLE:
default:
break;
}
}
}
return nextNodesToExecute;
}
static FailureOption getFailureOption(Dag<JobExecutionPlan> dag) {
if (dag.isEmpty()) {
return null;
}
DagNode<JobExecutionPlan> dagNode = dag.getStartNodes().get(0);
String failureOption = ConfigUtils.getString(getJobConfig(dagNode),
ConfigurationKeys.FLOW_FAILURE_OPTION, DagManager.DEFAULT_FLOW_FAILURE_OPTION);
return FailureOption.valueOf(failureOption);
}
static String getSpecExecutorUri(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue().getSpecExecutor().getUri().toString();
}
static String getSerializedRequesterList(DagNode<JobExecutionPlan> dagNode) {
return ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), RequesterService.REQUESTER_LIST, null);
}
static String getUserQuotaKey(String user, DagNode<JobExecutionPlan> dagNode) {
return user + QUOTA_KEY_SEPERATOR + getSpecExecutorUri(dagNode);
}
static String getFlowGroupQuotaKey(String flowGroup, DagNode<JobExecutionPlan> dagNode) {
return flowGroup + QUOTA_KEY_SEPERATOR + getSpecExecutorUri(dagNode);
}
/**
* Increment the value of {@link JobExecutionPlan#currentAttempts}
*/
static void incrementJobAttempt(DagNode<JobExecutionPlan> dagNode) {
dagNode.getValue().setCurrentAttempts(dagNode.getValue().getCurrentAttempts() + 1);
}
/**
* Increment the value of {@link JobExecutionPlan#currentGeneration}
* This method is not thread safe, we achieve correctness by making sure
* one dag will only be handled in the same DagManagerThread
*/
static void incrementJobGeneration(DagNode<JobExecutionPlan> dagNode) {
dagNode.getValue().setCurrentGeneration(dagNode.getValue().getCurrentGeneration() + 1);
}
/**
* Flow start time is the same as the flow execution id which is the timestamp flow request was received, unless it
* is a resumed flow, in which case it is {@link JobExecutionPlan#getFlowStartTime()}
* @param dagNode dag node in context
* @return flow start time
*/
static long getFlowStartTime(DagNode<JobExecutionPlan> dagNode) {
long flowStartTime = dagNode.getValue().getFlowStartTime();
return flowStartTime == 0L ? getFlowExecId(dagNode) : flowStartTime;
}
/**
* get the sla from the dag node config.
* if time unit is not provided, it assumes time unit is minute.
* @param dagNode dag node for which sla is to be retrieved
* @return sla if it is provided, DEFAULT_FLOW_SLA_MILLIS otherwise
*/
static long getFlowSLA(DagNode<JobExecutionPlan> dagNode) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
TimeUnit slaTimeUnit = TimeUnit.valueOf(ConfigUtils.getString(
jobConfig, ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME_UNIT, ConfigurationKeys.DEFAULT_GOBBLIN_FLOW_SLA_TIME_UNIT));
return jobConfig.hasPath(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME)
? slaTimeUnit.toMillis(jobConfig.getLong(ConfigurationKeys.GOBBLIN_FLOW_SLA_TIME))
: DEFAULT_FLOW_SLA_MILLIS;
}
/**
* get the job start sla from the dag node config.
* if time unit is not provided, it assumes time unit is minute.
* @param dagNode dag node for which flow start sla is to be retrieved
* @return job start sla in ms
*/
static long getJobStartSla(DagNode<JobExecutionPlan> dagNode, Long defaultJobStartSla) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
TimeUnit slaTimeUnit = TimeUnit.valueOf(ConfigUtils.getString(
jobConfig, ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME_UNIT, ConfigurationKeys.FALLBACK_GOBBLIN_JOB_START_SLA_TIME_UNIT));
return jobConfig.hasPath(ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME)
? slaTimeUnit.toMillis(jobConfig.getLong(ConfigurationKeys.GOBBLIN_JOB_START_SLA_TIME))
: defaultJobStartSla;
}
static int getDagQueueId(Dag<JobExecutionPlan> dag, int numThreads) {
return getDagQueueId(DagManagerUtils.getFlowExecId(dag), numThreads);
}
static int getDagQueueId(long flowExecutionId, int numThreads) {
return (int) (flowExecutionId % numThreads);
}
static Config getDagJobConfig(Dag<JobExecutionPlan> dag) {
// Every dag should have at least one node, and the job configurations are cloned among each node
return dag.getStartNodes().get(0).getValue().getJobSpec().getConfig();
}
static boolean shouldFlowOutputMetrics(Dag<JobExecutionPlan> dag) {
// defaults to false (so metrics are still tracked) if the dag property is not configured due to old dags
return ConfigUtils.getBoolean(getDagJobConfig(dag), ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS, true);
}
static String getSpecExecutorName(DagNode<JobExecutionPlan> dagNode) {
return dagNode.getValue().getSpecExecutor().getUri().toString();
}
static void emitFlowEvent(Optional<EventSubmitter> eventSubmitter, Dag<JobExecutionPlan> dag, String flowEvent) {
if (eventSubmitter.isPresent() && !dag.isEmpty()) {
// Every dag node will contain the same flow metadata
Config config = getDagJobConfig(dag);
Map<String, String> flowMetadata = TimingEventUtils.getFlowMetadata(config);
if (dag.getFlowEvent() != null) {
flowEvent = dag.getFlowEvent();
}
if (dag.getMessage() != null) {
flowMetadata.put(TimingEvent.METADATA_MESSAGE, dag.getMessage());
}
eventSubmitter.get().getTimingEvent(flowEvent).stop(flowMetadata);
}
}
static List<String> getDistinctUniqueRequesters(String serializedRequesters) {
if (serializedRequesters == null) {
return Collections.emptyList();
}
List<String> uniqueRequesters;
try {
uniqueRequesters = RequesterService.deserialize(serializedRequesters)
.stream()
.map(ServiceRequester::getName)
.distinct()
.collect(Collectors.toList());
return uniqueRequesters;
} catch (IOException e) {
throw new RuntimeException("Could not process requesters due to ", e);
}
}
}
| 3,901 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/InMemoryUserQuotaManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.google.inject.Inject;
import com.typesafe.config.Config;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import javax.inject.Singleton;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.exception.QuotaExceededException;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.service.ExecutionStatus.RUNNING;
/**
* An implementation of {@link UserQuotaManager} that stores quota usage in memory.
*/
@Slf4j
@Singleton
public class InMemoryUserQuotaManager extends AbstractUserQuotaManager {
private final Map<String, Integer> proxyUserToJobCount = new ConcurrentHashMap<>();
private final Map<String, Integer> flowGroupToJobCount = new ConcurrentHashMap<>();
private final Map<String, Integer> requesterToJobCount = new ConcurrentHashMap<>();
private final Set<String> runningDagIds;
@Inject
public InMemoryUserQuotaManager(Config config) {
super(config);
this.runningDagIds = ConcurrentHashMap.newKeySet();;
}
protected QuotaCheck increaseAndCheckQuota(Dag.DagNode<JobExecutionPlan> dagNode) throws IOException {
QuotaCheck quotaCheck = new QuotaCheck(true, true, true, "");
// Dag is already being tracked, no need to double increment for retries and multihop flows
if (containsDagId(DagManagerUtils.generateDagId(dagNode).toString())) {
return quotaCheck;
} else {
addDagId(DagManagerUtils.generateDagId(dagNode).toString());
}
String proxyUser = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), AzkabanProjectConfig.USER_TO_PROXY, null);
String flowGroup = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(),
ConfigurationKeys.FLOW_GROUP_KEY, "");
String specExecutorUri = DagManagerUtils.getSpecExecutorUri(dagNode);
StringBuilder requesterMessage = new StringBuilder();
boolean proxyUserCheck;
if (proxyUser != null && dagNode.getValue().getCurrentAttempts() <= 1) {
int proxyQuotaIncrement = incrementJobCountAndCheckQuota(
DagManagerUtils.getUserQuotaKey(proxyUser, dagNode), getQuotaForUser(proxyUser), CountType.USER_COUNT);
proxyUserCheck = proxyQuotaIncrement >= 0; // proxy user quota check succeeds
quotaCheck.setProxyUserCheck(proxyUserCheck);
if (!proxyUserCheck) {
// add 1 to proxyUserIncrement since proxyQuotaIncrement is the count before the increment
requesterMessage.append(String.format(
"Quota exceeded for proxy user %s on executor %s : quota=%s, requests above quota=%d%n",
proxyUser, specExecutorUri, getQuotaForUser(proxyUser), Math.abs(proxyQuotaIncrement) + 1 - getQuotaForUser(proxyUser)));
}
}
String serializedRequesters = DagManagerUtils.getSerializedRequesterList(dagNode);
boolean requesterCheck = true;
if (dagNode.getValue().getCurrentAttempts() <= 1) {
List<String> uniqueRequesters = DagManagerUtils.getDistinctUniqueRequesters(serializedRequesters);
for (String requester : uniqueRequesters) {
int userQuotaIncrement = incrementJobCountAndCheckQuota(
DagManagerUtils.getUserQuotaKey(requester, dagNode), getQuotaForUser(requester), CountType.REQUESTER_COUNT);
boolean thisRequesterCheck = userQuotaIncrement >= 0; // user quota check succeeds
requesterCheck = requesterCheck && thisRequesterCheck;
quotaCheck.setRequesterCheck(requesterCheck);
if (!thisRequesterCheck) {
requesterMessage.append(String.format(
"Quota exceeded for requester %s on executor %s : quota=%s, requests above quota=%d%n. ",
requester, specExecutorUri, getQuotaForUser(requester), Math.abs(userQuotaIncrement) + 1 - getQuotaForUser(requester)));
}
}
}
boolean flowGroupCheck;
if (dagNode.getValue().getCurrentAttempts() <= 1) {
int flowGroupQuotaIncrement = incrementJobCountAndCheckQuota(
DagManagerUtils.getFlowGroupQuotaKey(flowGroup, dagNode), getQuotaForFlowGroup(flowGroup), CountType.FLOWGROUP_COUNT);
flowGroupCheck = flowGroupQuotaIncrement >= 0;
quotaCheck.setFlowGroupCheck(flowGroupCheck);
if (!flowGroupCheck) {
requesterMessage.append(String.format("Quota exceeded for flowgroup %s on executor %s : quota=%s, requests above quota=%d%n",
flowGroup, specExecutorUri, getQuotaForFlowGroup(flowGroup),
Math.abs(flowGroupQuotaIncrement) + 1 - getQuotaForFlowGroup(flowGroup)));
}
}
quotaCheck.setRequesterMessage(requesterMessage.toString());
return quotaCheck;
}
protected void rollbackIncrements(Dag.DagNode<JobExecutionPlan> dagNode) throws IOException {
String proxyUser = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), AzkabanProjectConfig.USER_TO_PROXY, null);
String flowGroup = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), ConfigurationKeys.FLOW_GROUP_KEY, "");
List<String> usersQuotaIncrement = DagManagerUtils.getDistinctUniqueRequesters(DagManagerUtils.getSerializedRequesterList(dagNode));
decrementJobCount(DagManagerUtils.getUserQuotaKey(proxyUser, dagNode), CountType.USER_COUNT);
decrementQuotaUsageForUsers(usersQuotaIncrement);
decrementJobCount(DagManagerUtils.getFlowGroupQuotaKey(flowGroup, dagNode), CountType.FLOWGROUP_COUNT);
removeDagId(DagManagerUtils.generateDagId(dagNode).toString());
}
private int incrementJobCountAndCheckQuota(String key, int keyQuota, CountType countType) throws IOException {
int currentCount = incrementJobCount(key, countType);
if (currentCount >= keyQuota) {
return -currentCount;
} else {
return currentCount;
}
}
private void decrementQuotaUsageForUsers(List<String> requestersToDecreaseCount) throws IOException {
for (String requester : requestersToDecreaseCount) {
decrementJobCount(requester, CountType.REQUESTER_COUNT);
}
}
/**
* Decrement the quota by one for the proxy user and requesters corresponding to the provided {@link Dag.DagNode}.
* Returns true if the dag existed in the set of running dags and was removed successfully
*/
public boolean releaseQuota(Dag.DagNode<JobExecutionPlan> dagNode) throws IOException {
boolean val = removeDagId(DagManagerUtils.generateDagId(dagNode).toString());
if (!val) {
return false;
}
String proxyUser = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(), AzkabanProjectConfig.USER_TO_PROXY, null);
if (proxyUser != null) {
String proxyUserKey = DagManagerUtils.getUserQuotaKey(proxyUser, dagNode);
decrementJobCount(proxyUserKey, CountType.USER_COUNT);
}
String flowGroup = ConfigUtils.getString(dagNode.getValue().getJobSpec().getConfig(),
ConfigurationKeys.FLOW_GROUP_KEY, "");
decrementJobCount(DagManagerUtils.getFlowGroupQuotaKey(flowGroup, dagNode), CountType.FLOWGROUP_COUNT);
String serializedRequesters = DagManagerUtils.getSerializedRequesterList(dagNode);
try {
for (String requester : DagManagerUtils.getDistinctUniqueRequesters(serializedRequesters)) {
String requesterKey = DagManagerUtils.getUserQuotaKey(requester, dagNode);
decrementJobCount(requesterKey, CountType.REQUESTER_COUNT);
}
} catch (IOException e) {
log.error("Failed to release quota for requester list " + serializedRequesters, e);
return false;
}
return true;
}
void addDagId(String dagId) {
this.runningDagIds.add(dagId);
}
@Override
boolean containsDagId(String dagId) {
return this.runningDagIds.contains(dagId);
}
boolean removeDagId(String dagId) {
return this.runningDagIds.remove(dagId);
}
public void init(Collection<Dag<JobExecutionPlan>> dags) throws IOException {
for (Dag<JobExecutionPlan> dag : dags) {
for (Dag.DagNode<JobExecutionPlan> dagNode : dag.getNodes()) {
if (DagManagerUtils.getExecutionStatus(dagNode) == RUNNING) {
// Add all the currently running Dags to the quota limit per user
increaseAndCheckQuota(dagNode);
}
}
}
}
public void checkQuota(Collection<Dag.DagNode<JobExecutionPlan>> dagNodes) throws IOException {
for (Dag.DagNode<JobExecutionPlan> dagNode : dagNodes) {
QuotaCheck quotaCheck = increaseAndCheckQuota(dagNode);
if ((!quotaCheck.proxyUserCheck || !quotaCheck.requesterCheck || !quotaCheck.flowGroupCheck)) {
// roll back the increased counts in this block
rollbackIncrements(dagNode);
throw new QuotaExceededException(quotaCheck.requesterMessage);
}
}
}
private int incrementJobCount(String key, Map<String, Integer> quotaMap) {
Integer currentCount;
// Modifications must be thread safe since DAGs on DagManagerThreads may update the quota for the same user
do {
currentCount = quotaMap.get(key);
} while (currentCount == null ? quotaMap.putIfAbsent(key, 1) != null : !quotaMap.replace(key, currentCount, currentCount + 1));
if (currentCount == null) {
currentCount = 0;
}
return currentCount;
}
private void decrementJobCount(String key, Map<String, Integer> quotaMap) {
Integer currentCount;
if (key == null) {
return;
}
do {
currentCount = quotaMap.get(key);
} while (currentCount != null && currentCount > 0 && !quotaMap.replace(key, currentCount, currentCount - 1));
if (currentCount == null || currentCount == 0) {
log.warn("Decrement job count was called for " + key + " when the count was already zero/absent.");
}
}
int incrementJobCount(String user, CountType countType) throws IOException {
switch (countType) {
case USER_COUNT:
return incrementJobCount(user, proxyUserToJobCount);
case REQUESTER_COUNT:
return incrementJobCount(user, requesterToJobCount);
case FLOWGROUP_COUNT:
return incrementJobCount(user, flowGroupToJobCount);
default:
throw new IOException("Invalid count type " + countType);
}
}
void decrementJobCount(String user, CountType countType) throws IOException {
switch (countType) {
case USER_COUNT:
decrementJobCount(user, proxyUserToJobCount);
break;
case REQUESTER_COUNT:
decrementJobCount(user, requesterToJobCount);
break;
case FLOWGROUP_COUNT:
decrementJobCount(user, flowGroupToJobCount);
break;
default:
throw new IOException("Invalid count type " + countType);
}
}
} | 3,902 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/Orchestrator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nonnull;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.DagActionStore;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.SpecCompiler;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.utils.FlowCompilationValidationHelper;
import org.apache.gobblin.service.modules.utils.SharedFlowMetricsSingleton;
import org.apache.gobblin.service.monitoring.FlowStatusGenerator;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Orchestrator that is a {@link SpecCatalogListener}. It listens to changes
* to {@link TopologyCatalog} and updates {@link SpecCompiler} state
* Also it listens to {@link org.apache.gobblin.runtime.spec_catalog.FlowCatalog} and use the compiler to compile the new flow spec.
*/
@Alpha
@Singleton
public class Orchestrator implements SpecCatalogListener, Instrumentable {
protected final Logger _log;
protected final SpecCompiler specCompiler;
protected final Optional<TopologyCatalog> topologyCatalog;
protected final Optional<DagManager> dagManager;
protected final MetricContext metricContext;
protected final Optional<EventSubmitter> eventSubmitter;
private final boolean isFlowConcurrencyEnabled;
@Getter
private Optional<Meter> flowOrchestrationSuccessFulMeter;
@Getter
private Optional<Meter> flowOrchestrationFailedMeter;
@Getter
private Optional<Timer> flowOrchestrationTimer;
private Optional<Counter> flowFailedForwardToDagManagerCounter;
@Setter
private FlowStatusGenerator flowStatusGenerator;
private UserQuotaManager quotaManager;
private final FlowCompilationValidationHelper flowCompilationValidationHelper;
private Optional<FlowTriggerHandler> flowTriggerHandler;
@Getter
private final SharedFlowMetricsSingleton sharedFlowMetricsSingleton;
private final ClassAliasResolver<SpecCompiler> aliasResolver;
public Orchestrator(Config config, Optional<TopologyCatalog> topologyCatalog, Optional<DagManager> dagManager,
Optional<Logger> log, FlowStatusGenerator flowStatusGenerator, boolean instrumentationEnabled,
Optional<FlowTriggerHandler> flowTriggerHandler, SharedFlowMetricsSingleton sharedFlowMetricsSingleton) {
_log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
this.aliasResolver = new ClassAliasResolver<>(SpecCompiler.class);
this.topologyCatalog = topologyCatalog;
this.dagManager = dagManager;
this.flowStatusGenerator = flowStatusGenerator;
this.flowTriggerHandler = flowTriggerHandler;
this.sharedFlowMetricsSingleton = sharedFlowMetricsSingleton;
try {
String specCompilerClassName = ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_FLOWCOMPILER_CLASS;
if (config.hasPath(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY)) {
specCompilerClassName = config.getString(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY);
}
_log.info("Using specCompiler class name/alias " + specCompilerClassName);
this.specCompiler = (SpecCompiler) ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve(specCompilerClassName)), config);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException |
ClassNotFoundException e) {
throw new RuntimeException(e);
}
//At this point, the TopologySpecMap is initialized by the SpecCompiler. Pass the TopologySpecMap to the DagManager.
if (this.dagManager.isPresent()) {
this.dagManager.get().setTopologySpecMap(getSpecCompiler().getTopologySpecMap());
}
if (instrumentationEnabled) {
this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), this.specCompiler.getClass());
this.flowOrchestrationSuccessFulMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_ORCHESTRATION_SUCCESSFUL_METER));
this.flowOrchestrationFailedMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_ORCHESTRATION_FAILED_METER));
this.flowOrchestrationTimer = Optional.of(this.metricContext.timer(ServiceMetricNames.FLOW_ORCHESTRATION_TIMER));
this.flowFailedForwardToDagManagerCounter = Optional.of(this.metricContext.counter(ServiceMetricNames.FLOW_FAILED_FORWARD_TO_DAG_MANAGER_COUNT));
this.eventSubmitter = Optional.of(new EventSubmitter.Builder(this.metricContext, "org.apache.gobblin.service").build());
} else {
this.metricContext = null;
this.flowOrchestrationSuccessFulMeter = Optional.absent();
this.flowOrchestrationFailedMeter = Optional.absent();
this.flowOrchestrationTimer = Optional.absent();
this.flowFailedForwardToDagManagerCounter = Optional.absent();
this.eventSubmitter = Optional.absent();
}
this.isFlowConcurrencyEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.FLOW_CONCURRENCY_ALLOWED,
ServiceConfigKeys.DEFAULT_FLOW_CONCURRENCY_ALLOWED);
quotaManager = GobblinConstructorUtils.invokeConstructor(UserQuotaManager.class,
ConfigUtils.getString(config, ServiceConfigKeys.QUOTA_MANAGER_CLASS, ServiceConfigKeys.DEFAULT_QUOTA_MANAGER),
config);
this.flowCompilationValidationHelper = new FlowCompilationValidationHelper(sharedFlowMetricsSingleton, specCompiler,
quotaManager, eventSubmitter, flowStatusGenerator, isFlowConcurrencyEnabled);
}
@Inject
public Orchestrator(Config config, FlowStatusGenerator flowStatusGenerator, Optional<TopologyCatalog> topologyCatalog,
Optional<DagManager> dagManager, Optional<Logger> log, Optional<FlowTriggerHandler> flowTriggerHandler,
SharedFlowMetricsSingleton sharedFlowMetricsSingleton) {
this(config, topologyCatalog, dagManager, log, flowStatusGenerator, true, flowTriggerHandler,
sharedFlowMetricsSingleton);
}
@VisibleForTesting
public SpecCompiler getSpecCompiler() {
return this.specCompiler;
}
/** {@inheritDoc} */
@Override
public AddSpecResponse onAddSpec(Spec addedSpec) {
if (addedSpec instanceof TopologySpec) {
_log.info("New Spec detected of type TopologySpec: " + addedSpec);
this.specCompiler.onAddSpec(addedSpec);
} else if (addedSpec instanceof FlowSpec) {
_log.info("New Spec detected of type FlowSpec: " + addedSpec);
return this.specCompiler.onAddSpec(addedSpec);
}
return new AddSpecResponse(null);
}
public void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion) {
onDeleteSpec(deletedSpecURI, deletedSpecVersion, new Properties());
}
/** {@inheritDoc} */
@Override
public void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion, Properties headers) {
_log.info("Spec deletion detected: " + deletedSpecURI + "/" + deletedSpecVersion);
if (topologyCatalog.isPresent()) {
this.specCompiler.onDeleteSpec(deletedSpecURI, deletedSpecVersion, headers);
}
}
/** {@inheritDoc} */
@Override
public void onUpdateSpec(Spec updatedSpec) {
_log.info("Spec changed: " + updatedSpec);
if (updatedSpec instanceof FlowSpec) {
onAddSpec(updatedSpec);
}
if (!(updatedSpec instanceof TopologySpec)) {
return;
}
try {
onDeleteSpec(updatedSpec.getUri(), updatedSpec.getVersion());
} catch (Exception e) {
_log.error("Failed to update Spec: " + updatedSpec, e);
}
try {
onAddSpec(updatedSpec);
} catch (Exception e) {
_log.error("Failed to update Spec: " + updatedSpec, e);
}
}
public void orchestrate(Spec spec, Properties jobProps, long triggerTimestampMillis, boolean isReminderEvent)
throws Exception {
// Add below waiting because TopologyCatalog and FlowCatalog service can be launched at the same time
this.topologyCatalog.get().getInitComplete().await();
//Wait for the SpecCompiler to become healthy.
this.getSpecCompiler().awaitHealthy();
long startTime = System.nanoTime();
if (spec instanceof FlowSpec) {
Config flowConfig = ((FlowSpec) spec).getConfig();
String flowGroup = flowConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY);
String flowName = flowConfig.getString(ConfigurationKeys.FLOW_NAME_KEY);
sharedFlowMetricsSingleton.addFlowGauge(spec, flowConfig, flowGroup, flowName);
Optional<TimingEvent> flowCompilationTimer =
this.eventSubmitter.transform(submitter -> new TimingEvent(submitter, TimingEvent.FlowTimings.FLOW_COMPILED));
Optional<Dag<JobExecutionPlan>> jobExecutionPlanDagOptional =
this.flowCompilationValidationHelper.validateAndHandleConcurrentExecution(flowConfig, spec, flowGroup,
flowName);
if (!jobExecutionPlanDagOptional.isPresent()) {
Instrumented.markMeter(this.flowOrchestrationFailedMeter);
return;
}
Map<String, String> flowMetadata = TimingEventUtils.getFlowMetadata((FlowSpec) spec);
FlowCompilationValidationHelper.addFlowExecutionIdIfAbsent(flowMetadata, jobExecutionPlanDagOptional.get());
// If multi-active scheduler is enabled do not pass onto DagManager, otherwise scheduler forwards it directly
// Skip flow compilation as well, since we recompile after receiving event from DagActionStoreChangeMonitor later
if (flowTriggerHandler.isPresent()) {
// If triggerTimestampMillis was not set by the job trigger handler, then we do not handle this event
if (triggerTimestampMillis == Long.parseLong(ConfigurationKeys.ORCHESTRATOR_TRIGGER_EVENT_TIME_NEVER_SET_VAL)) {
_log.warn("Skipping execution of spec: {} because missing trigger timestamp in job properties",
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
flowMetadata.put(TimingEvent.METADATA_MESSAGE, "Flow orchestration skipped because no trigger timestamp "
+ "associated with flow action.");
if (this.eventSubmitter.isPresent()) {
new TimingEvent(this.eventSubmitter.get(), TimingEvent.FlowTimings.FLOW_FAILED).stop(flowMetadata);
}
return;
}
String flowExecutionId = flowMetadata.get(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD);
DagActionStore.DagAction flowAction =
new DagActionStore.DagAction(flowGroup, flowName, flowExecutionId, DagActionStore.FlowActionType.LAUNCH);
flowTriggerHandler.get().handleTriggerEvent(jobProps, flowAction, triggerTimestampMillis, isReminderEvent);
_log.info("Multi-active scheduler finished handling trigger event: [{}, is: {}, triggerEventTimestamp: {}]",
flowAction, isReminderEvent ? "reminder" : "original", triggerTimestampMillis);
} else {
Dag<JobExecutionPlan> jobExecutionPlanDag = jobExecutionPlanDagOptional.get();
if (jobExecutionPlanDag == null || jobExecutionPlanDag.isEmpty()) {
FlowCompilationValidationHelper.populateFlowCompilationFailedEventMessage(eventSubmitter, spec, flowMetadata);
Instrumented.markMeter(this.flowOrchestrationFailedMeter);
sharedFlowMetricsSingleton.conditionallyUpdateFlowGaugeSpecState(spec,
SharedFlowMetricsSingleton.CompiledState.FAILED);
_log.warn("Cannot determine an executor to run on for Spec: " + spec);
return;
}
sharedFlowMetricsSingleton.conditionallyUpdateFlowGaugeSpecState(spec,
SharedFlowMetricsSingleton.CompiledState.SUCCESSFUL);
FlowCompilationValidationHelper.addFlowExecutionIdIfAbsent(flowMetadata, jobExecutionPlanDag);
if (flowCompilationTimer.isPresent()) {
flowCompilationTimer.get().stop(flowMetadata);
}
// Depending on if DagManager is present, handle execution
if (this.dagManager.isPresent()) {
submitFlowToDagManager((FlowSpec) spec, jobExecutionPlanDag);
} else {
// Schedule all compiled JobSpecs on their respective Executor
for (Dag.DagNode<JobExecutionPlan> dagNode : jobExecutionPlanDag.getNodes()) {
DagManagerUtils.incrementJobAttempt(dagNode);
JobExecutionPlan jobExecutionPlan = dagNode.getValue();
// Run this spec on selected executor
SpecProducer producer = null;
try {
producer = jobExecutionPlan.getSpecExecutor().getProducer().get();
Spec jobSpec = jobExecutionPlan.getJobSpec();
if (!((JobSpec) jobSpec).getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
_log.warn("JobSpec does not contain flowExecutionId.");
}
Map<String, String> jobMetadata = TimingEventUtils.getJobMetadata(flowMetadata, jobExecutionPlan);
_log.info(String.format("Going to orchestrate JobSpec: %s on Executor: %s", jobSpec, producer));
Optional<TimingEvent> jobOrchestrationTimer = this.eventSubmitter.transform(
submitter -> new TimingEvent(submitter, TimingEvent.LauncherTimings.JOB_ORCHESTRATED));
producer.addSpec(jobSpec);
if (jobOrchestrationTimer.isPresent()) {
jobOrchestrationTimer.get().stop(jobMetadata);
}
} catch (Exception e) {
_log.error("Cannot successfully setup spec: " + jobExecutionPlan.getJobSpec() + " on executor: " + producer
+ " for flow: " + spec, e);
}
}
}
}
} else {
Instrumented.markMeter(this.flowOrchestrationFailedMeter);
throw new RuntimeException("Spec not of type FlowSpec, cannot orchestrate: " + spec);
}
Instrumented.markMeter(this.flowOrchestrationSuccessFulMeter);
Instrumented.updateTimer(this.flowOrchestrationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
}
public void submitFlowToDagManager(FlowSpec flowSpec, Optional<String> optionalFlowExecutionId) throws IOException, InterruptedException {
Optional<Dag<JobExecutionPlan>> optionalJobExecutionPlanDag =
this.flowCompilationValidationHelper.createExecutionPlanIfValid(flowSpec, optionalFlowExecutionId);
if (optionalJobExecutionPlanDag.isPresent()) {
submitFlowToDagManager(flowSpec, optionalJobExecutionPlanDag.get());
} else {
_log.warn("Flow: {} submitted to dagManager failed to compile and produce a job execution plan dag", flowSpec);
Instrumented.markMeter(this.flowOrchestrationFailedMeter);
}
}
public void submitFlowToDagManager(FlowSpec flowSpec, Dag<JobExecutionPlan> jobExecutionPlanDag)
throws IOException {
try {
//Send the dag to the DagManager.
this.dagManager.get().addDag(jobExecutionPlanDag, true, true);
} catch (Exception ex) {
String failureMessage = "Failed to add Job Execution Plan due to: " + ex.getMessage();
_log.warn("Orchestrator call - " + failureMessage, ex);
if (this.flowFailedForwardToDagManagerCounter.isPresent()) {
this.flowFailedForwardToDagManagerCounter.get().inc();
}
if (this.eventSubmitter.isPresent()) {
// pronounce failed before stack unwinds, to ensure flow not marooned in `COMPILED` state; (failure likely attributable to DB connection/failover)
Map<String, String> flowMetadata = TimingEventUtils.getFlowMetadata(flowSpec);
flowMetadata.put(TimingEvent.METADATA_MESSAGE, failureMessage);
new TimingEvent(this.eventSubmitter.get(), TimingEvent.FlowTimings.FLOW_FAILED).stop(flowMetadata);
}
throw ex;
}
}
public void remove(Spec spec, Properties headers) throws IOException {
// TODO: Evolve logic to cache and reuse previously compiled JobSpecs
// .. this will work for Identity compiler but not always for multi-hop.
// Note: Current logic assumes compilation is consistent between all executions
if (spec instanceof FlowSpec) {
//Send the dag to the DagManager to stop it.
//Also send it to the SpecProducer to do any cleanup tasks on SpecExecutor.
if (this.dagManager.isPresent()) {
_log.info("Forwarding cancel request for flow URI {} to DagManager.", spec.getUri());
this.dagManager.get().stopDag(spec.getUri());
}
// We need to recompile the flow to find the spec producer,
// If compilation result is different, its remove request can go to some different spec producer
deleteFromExecutor(spec, headers);
} else {
throw new RuntimeException("Spec not of type FlowSpec, cannot delete: " + spec);
}
}
private void deleteFromExecutor(Spec spec, Properties headers) {
Dag<JobExecutionPlan> jobExecutionPlanDag = specCompiler.compileFlow(spec);
if (jobExecutionPlanDag.isEmpty()) {
_log.warn("Cannot determine an executor to delete Spec: " + spec);
return;
}
// Delete all compiled JobSpecs on their respective Executor
for (Dag.DagNode<JobExecutionPlan> dagNode : jobExecutionPlanDag.getNodes()) {
JobExecutionPlan jobExecutionPlan = dagNode.getValue();
JobSpec jobSpec = jobExecutionPlan.getJobSpec();
try {
SpecProducer<Spec> producer = jobExecutionPlan.getSpecExecutor().getProducer().get();
if (jobSpec.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
headers.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, jobSpec.getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
}
_log.info(String.format("Going to delete JobSpec: %s on Executor: %s", jobSpec, producer));
producer.deleteSpec(jobSpec.getUri(), headers);
} catch (Exception e) {
_log.error(String.format("Could not delete JobSpec: %s for flow: %s", jobSpec, spec), e);
}
}
}
@Nonnull
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return null != this.metricContext;
}
@Override
public List<Tag<?>> generateTags(State state) {
return Collections.emptyList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
throw new UnsupportedOperationException();
}
@Override
public void switchMetricContext(MetricContext context) {
throw new UnsupportedOperationException();
}
} | 3,903 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/MysqlDagStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.IOException;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metastore.MysqlDagStateStoreFactory;
import org.apache.gobblin.metastore.MysqlStateStore;
import org.apache.gobblin.metastore.MysqlStateStoreEntryManager;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metastore.predicates.StateStorePredicate;
import org.apache.gobblin.metrics.ContextAwareCounter;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_serde.GsonSerDe;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanListDeserializer;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanListSerializer;
import org.apache.gobblin.util.ConfigUtils;
import com.google.common.base.Joiner;
import com.google.common.base.Predicates;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonSerializer;
import com.google.gson.reflect.TypeToken;
import com.typesafe.config.Config;
import static org.apache.gobblin.service.ServiceConfigKeys.GOBBLIN_SERVICE_PREFIX;
import static org.apache.gobblin.service.modules.orchestration.DagManagerUtils.generateDagId;
/**
* A implementation of {@link DagStateStore} using MySQL as a backup, leverage {@link MysqlStateStore}.
* It implements interfaces of {@link DagStateStore} but delegating responsibilities to methods provided
* in {@link MysqlStateStore}.
* It also implements conversion between {@link Dag<JobExecutionPlan>} to {@link State}.
*
* The schema of this will simply be:
* | storeName | tableName | State |
* where storeName represents FlowId, a combination of FlowGroup and FlowName, and tableName represents FlowExecutionId.
* State is a pocket for serialized {@link Dag} object.
*
*
*/
public class MysqlDagStateStore implements DagStateStore {
public static final String CONFIG_PREFIX = GOBBLIN_SERVICE_PREFIX + "mysqlDagStateStore";
public static final String DAG_KEY_IN_STATE = "dag";
/**
* The schema of {@link MysqlStateStore} is fixed but the columns are semantically projected into Dag's context:
* - The 'storeName' is FlowId.
* - The 'tableName' is FlowExecutionId.
*/
private MysqlStateStore<State> mysqlStateStore;
private final GsonSerDe<List<JobExecutionPlan>> serDe;
private JobExecutionPlanDagFactory jobExecPlanDagFactory;
private MetricContext metricContext;
private ContextAwareCounter totalDagCount;
public MysqlDagStateStore(Config config, Map<URI, TopologySpec> topologySpecMap) {
if (config.hasPath(CONFIG_PREFIX)) {
config = config.getConfig(CONFIG_PREFIX).withFallback(config);
}
this.mysqlStateStore = (MysqlStateStore<State>) createStateStore(config);
JsonSerializer<List<JobExecutionPlan>> serializer = new JobExecutionPlanListSerializer();
JsonDeserializer<List<JobExecutionPlan>> deserializer = new JobExecutionPlanListDeserializer(topologySpecMap);
Type typeToken = new TypeToken<List<JobExecutionPlan>>() {
}.getType();
this.serDe = new GsonSerDe<>(typeToken, serializer, deserializer);
this.jobExecPlanDagFactory = new JobExecutionPlanDagFactory();
this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(ConfigUtils.configToProperties(config)),
this.getClass());
this.totalDagCount = this.metricContext.contextAwareCounter(ServiceMetricNames.DAG_COUNT_MYSQL_DAG_STATE_COUNT);
}
/**
* Creating an instance of StateStore.
*/
protected StateStore<State> createStateStore(Config config) {
try {
return (MysqlDagStateStoreFactory.class.newInstance()).createStateStore(config, State.class);
} catch (ReflectiveOperationException rfoe) {
throw new RuntimeException("A MySQL StateStore cannot be correctly initialized due to:", rfoe);
}
}
@Override
public void writeCheckpoint(Dag<JobExecutionPlan> dag)
throws IOException {
mysqlStateStore.put(getStoreNameFromDagId(generateDagId(dag).toString()), getTableNameFromDagId(generateDagId(dag).toString()), convertDagIntoState(dag));
this.totalDagCount.inc();
}
@Override
public void cleanUp(Dag<JobExecutionPlan> dag)
throws IOException {
cleanUp(generateDagId(dag).toString());
}
@Override
public void cleanUp(String dagId)
throws IOException {
mysqlStateStore.delete(getStoreNameFromDagId(dagId), getTableNameFromDagId(dagId));
this.totalDagCount.dec();
}
@Override
public List<Dag<JobExecutionPlan>> getDags()
throws IOException {
return mysqlStateStore.getAll().stream().map(this::convertStateObjIntoDag).collect(Collectors.toList());
}
@Override
public Dag<JobExecutionPlan> getDag(String dagId) throws IOException {
List<State> states = mysqlStateStore.getAll(getStoreNameFromDagId(dagId), getTableNameFromDagId(dagId));
if (states.isEmpty()) {
return null;
}
return convertStateObjIntoDag(states.get(0));
}
@Override
public Set<String> getDagIds() throws IOException {
List<MysqlStateStoreEntryManager> entries = (List<MysqlStateStoreEntryManager>) mysqlStateStore
.getMetadataForTables(new StateStorePredicate(Predicates.alwaysTrue()));
return entries.stream().map(entry -> entryToDagId(entry.getStoreName(), entry.getTableName())).collect(Collectors.toSet());
}
/**
* Convert a state store entry into a dag ID
* e.g. storeName = group1_name1, tableName = 1234 gives dagId group1_name1_1234
*/
private String entryToDagId(String storeName, String tableName) {
return Joiner.on(ServiceConfigKeys.DAG_STORE_KEY_SEPARATION_CHARACTER).join(storeName, tableName);
}
/**
* Return a storeName given a dagId. Store name is defined as flowGroup_flowName.
*/
private String getStoreNameFromDagId(String dagId) {
return dagId.substring(0, dagId.lastIndexOf(ServiceConfigKeys.DAG_STORE_KEY_SEPARATION_CHARACTER));
}
/**
* Return a tableName given a dagId. Table name is defined as the flowExecutionId.
*/
private String getTableNameFromDagId(String dagId) {
return dagId.substring(dagId.lastIndexOf(ServiceConfigKeys.DAG_STORE_KEY_SEPARATION_CHARACTER) + 1);
}
/**
* For {@link Dag} to work with {@link MysqlStateStore}, it needs to be packaged into a {@link State} object.
* The way that it does is simply serialize the {@link Dag} first and use the key {@link #DAG_KEY_IN_STATE}
* to be pair with it.
*
* The serialization step is required for readability and portability of serde lib.
* @param dag The dag to be converted.
* @return An {@link State} object that contains a single k-v pair for {@link Dag}.
*/
private State convertDagIntoState(Dag<JobExecutionPlan> dag) {
State outputState = new State();
// Make sure the object has been serialized.
List<JobExecutionPlan> jobExecutionPlanList =
dag.getNodes().stream().map(Dag.DagNode::getValue).collect(Collectors.toList());
outputState.setProp(DAG_KEY_IN_STATE, serDe.serialize(jobExecutionPlanList));
return outputState;
}
/**
* Get the {@link Dag} out of a {@link State} pocket.
*/
private Dag<JobExecutionPlan> convertStateObjIntoDag(State state) {
String serializedJobExecPlanList = state.getProp(DAG_KEY_IN_STATE);
return jobExecPlanDagFactory.createDag(serDe.deserialize(serializedJobExecPlanList));
}
}
| 3,904 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/TimingEventUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.util.Map;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.util.ConfigUtils;
public class TimingEventUtils {
public static Map<String, String> getFlowMetadata(FlowSpec flowSpec) {
return getFlowMetadata(flowSpec.getConfig());
}
static Map<String, String> getFlowMetadata(Config flowConfig) {
Map<String, String> metadata = Maps.newHashMap();
metadata.put(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowConfig.getString(ConfigurationKeys.FLOW_NAME_KEY));
metadata.put(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY));
if (flowConfig.hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
metadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, flowConfig.getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
}
return metadata;
}
static Map<String, String> getJobMetadata(Map<String, String> flowMetadata, JobExecutionPlan jobExecutionPlan) {
Map<String, String> jobMetadata = Maps.newHashMap();
JobSpec jobSpec = jobExecutionPlan.getJobSpec();
SpecExecutor specExecutor = jobExecutionPlan.getSpecExecutor();
jobMetadata.putAll(flowMetadata);
jobMetadata.put(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, jobSpec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY));
jobMetadata.put(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, jobSpec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY));
jobMetadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, jobSpec.getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
jobMetadata.put(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, jobSpec.getConfig().getString(ConfigurationKeys.JOB_NAME_KEY));
jobMetadata.put(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, jobSpec.getConfig().getString(ConfigurationKeys.JOB_GROUP_KEY));
jobMetadata.put(TimingEvent.FlowEventConstants.JOB_TAG_FIELD, ConfigUtils.getString(jobSpec.getConfig(), ConfigurationKeys.JOB_TAG_KEY, null));
jobMetadata.put(TimingEvent.FlowEventConstants.SPEC_EXECUTOR_FIELD, specExecutor.getUri().toString());
jobMetadata.put(TimingEvent.FlowEventConstants.MAX_ATTEMPTS_FIELD, Integer.toString(jobExecutionPlan.getMaxAttempts()));
jobMetadata.put(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, Integer.toString(jobExecutionPlan.getCurrentAttempts()));
jobMetadata.put(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, Integer.toString(jobExecutionPlan.getCurrentGeneration()));
jobMetadata.put(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, Boolean.toString(false));
jobMetadata.put(TimingEvent.FlowEventConstants.FLOW_EDGE_FIELD,
ConfigUtils.getString(jobSpec.getConfig(), ConfigurationKeys.FLOW_EDGE_ID_KEY, ""));
jobMetadata.put(TimingEvent.FlowEventConstants.FLOW_MODIFICATION_TIME_FIELD, Long.toString(
ConfigUtils.getLong(jobSpec.getConfig(), FlowSpec.MODIFICATION_TIME_KEY, 0L)));
return jobMetadata;
}
}
| 3,905 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/FSDagStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonSerializer;
import com.google.gson.reflect.TypeToken;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.ContextAwareCounter;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_serde.GsonSerDe;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanListDeserializer;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanListSerializer;
import org.apache.gobblin.util.ConfigUtils;
@Alpha
@Slf4j
public class FSDagStateStore implements DagStateStore {
public static final String DAG_FILE_EXTENSION = ".dag";
static final String DAG_STATESTORE_DIR = DagManager.DAG_MANAGER_PREFIX + "dagStateStoreDir";
private final String dagCheckpointDir;
private final GsonSerDe<List<JobExecutionPlan>> serDe;
private MetricContext metricContext;
private ContextAwareCounter totalDagCount;
public FSDagStateStore(Config config, Map<URI, TopologySpec> topologySpecMap) throws IOException {
this.dagCheckpointDir = config.getString(DAG_STATESTORE_DIR);
File checkpointDir = new File(this.dagCheckpointDir);
if (!checkpointDir.exists()) {
if (!checkpointDir.mkdirs()) {
throw new IOException("Could not create dag state store dir - " + this.dagCheckpointDir);
}
}
JsonSerializer<List<JobExecutionPlan>> serializer = new JobExecutionPlanListSerializer();
JsonDeserializer<List<JobExecutionPlan>> deserializer = new JobExecutionPlanListDeserializer(topologySpecMap);
/** {@link Type} object will need to strictly match with the generic arguments being used
* to define {@link GsonSerDe}
* Due to type erasure, the {@link Type} needs to initialized here instead of inside {@link GsonSerDe}.
* */
Type typeToken = new TypeToken<List<JobExecutionPlan>>(){}.getType();
this.serDe = new GsonSerDe<>(typeToken, serializer, deserializer);
this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(ConfigUtils.configToProperties(config)),
this.getClass());
this.totalDagCount = this.metricContext.contextAwareCounter(ServiceMetricNames.DAG_COUNT_FS_DAG_STATE_COUNT);
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void writeCheckpoint(Dag<JobExecutionPlan> dag) throws IOException {
// write to a temporary name then rename to make the operation atomic when the file system allows a file to be
// replaced
String fileName = DagManagerUtils.generateDagId(dag) + DAG_FILE_EXTENSION;
String serializedDag = serializeDag(dag);
File tmpCheckpointFile = new File(this.dagCheckpointDir, fileName + ".tmp");
File checkpointFile = new File(this.dagCheckpointDir, fileName);
Files.write(serializedDag, tmpCheckpointFile, Charsets.UTF_8);
Files.move(tmpCheckpointFile, checkpointFile);
this.totalDagCount.inc();
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void cleanUp(Dag<JobExecutionPlan> dag) {
cleanUp(DagManagerUtils.generateDagId(dag).toString());
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void cleanUp(String dagId) {
String fileName = dagId + DAG_FILE_EXTENSION;
//Delete the dag checkpoint file from the checkpoint directory
File checkpointFile = new File(this.dagCheckpointDir, fileName);
if (!checkpointFile.delete()) {
log.error("Could not delete checkpoint file: {}", checkpointFile.getName());
} else {
this.totalDagCount.dec();
}
}
/**
* {@inheritDoc}
*/
@Override
public List<Dag<JobExecutionPlan>> getDags() throws IOException {
List<Dag<JobExecutionPlan>> runningDags = Lists.newArrayList();
File dagCheckpointFolder = new File(this.dagCheckpointDir);
for (File file : dagCheckpointFolder.listFiles((dir, name) -> name.endsWith(DAG_FILE_EXTENSION))) {
runningDags.add(getDag(file));
}
return runningDags;
}
/**
* {@inheritDoc}
*/
@Override
public Dag<JobExecutionPlan> getDag(String dagId) throws IOException {
File file = new File(this.dagCheckpointDir, dagId + DAG_FILE_EXTENSION);
if (!file.exists()) {
return null;
}
return getDag(file);
}
/**
* Return a {@link Dag} given a file name.
* @param dagFile
* @return the {@link Dag} associated with the dagFile.
*/
@VisibleForTesting
public Dag<JobExecutionPlan> getDag(File dagFile) throws IOException {
String serializedDag = Files.toString(dagFile, Charsets.UTF_8);
return deserializeDag(serializedDag);
}
/**
* {@inheritDoc}
*/
@Override
public Set<String> getDagIds() {
Set<String> dagIds = new HashSet<>();
File dagCheckpointFolder = new File(this.dagCheckpointDir);
for (File file : dagCheckpointFolder.listFiles((dir, name) -> name.endsWith(DAG_FILE_EXTENSION))) {
dagIds.add(StringUtils.removeEnd(file.getName(), DAG_FILE_EXTENSION));
}
return dagIds;
}
/**
* Serialize a {@link Dag<JobExecutionPlan>}.
* @param dag A Dag parametrized by type {@link JobExecutionPlan}.
* @return a JSON string representation of the Dag object.
*/
private String serializeDag(Dag<JobExecutionPlan> dag) {
List<JobExecutionPlan> jobExecutionPlanList = dag.getNodes().stream().map(Dag.DagNode::getValue).collect(Collectors.toList());
return serDe.serialize(jobExecutionPlanList);
}
/**
* De-serialize a Dag.
* @param jsonDag A string representation of a Dag.
* @return a {@link Dag} parametrized by {@link JobExecutionPlan}.
*/
private Dag<JobExecutionPlan> deserializeDag(String jsonDag) {
return new JobExecutionPlanDagFactory().createDag(serDe.deserialize(jsonDag));
}
}
| 3,906 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/AbstractUserQuotaManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.IOException;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import lombok.AllArgsConstructor;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.ConfigUtils;
/**
* An abstract implementation of {@link UserQuotaManager} that has base implementation of checking quota and increasing/decreasing it.
*/
@Slf4j
abstract public class AbstractUserQuotaManager implements UserQuotaManager {
public static final String PER_USER_QUOTA = DagManager.DAG_MANAGER_PREFIX + "perUserQuota";
public static final String PER_FLOWGROUP_QUOTA = DagManager.DAG_MANAGER_PREFIX + "perFlowGroupQuota";
public static final String USER_JOB_QUOTA_KEY = DagManager.DAG_MANAGER_PREFIX + "defaultJobQuota";
public static final String QUOTA_SEPERATOR = ":";
public static final Integer DEFAULT_USER_JOB_QUOTA = Integer.MAX_VALUE;
private final Map<String, Integer> perUserQuota;
private final Map<String, Integer> perFlowGroupQuota;
protected MetricContext metricContext;
private final int defaultQuota;
public AbstractUserQuotaManager(Config config) {
this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(ConfigUtils.configToProperties(config)),
this.getClass());
this.defaultQuota = ConfigUtils.getInt(config, USER_JOB_QUOTA_KEY, DEFAULT_USER_JOB_QUOTA);
ImmutableMap.Builder<String, Integer> userMapBuilder = ImmutableMap.builder();
ImmutableMap.Builder<String, Integer> flowGroupMapBuilder = ImmutableMap.builder();
// Quotas will take form of user:<Quota> and flowGroup:<Quota>
for (String flowGroupQuota : ConfigUtils.getStringList(config, PER_FLOWGROUP_QUOTA)) {
flowGroupMapBuilder.put(flowGroupQuota.split(QUOTA_SEPERATOR)[0], Integer.parseInt(flowGroupQuota.split(QUOTA_SEPERATOR)[1]));
}
// Keep quotas per user as well in form user:<Quota> which apply for all flowgroups
for (String userQuota : ConfigUtils.getStringList(config, PER_USER_QUOTA)) {
userMapBuilder.put(userQuota.split(QUOTA_SEPERATOR)[0], Integer.parseInt(userQuota.split(QUOTA_SEPERATOR)[1]));
}
this.perUserQuota = userMapBuilder.build();
this.perFlowGroupQuota = flowGroupMapBuilder.build();
}
abstract boolean containsDagId(String dagId) throws IOException;
int getQuotaForUser(String user) {
return this.perUserQuota.getOrDefault(user, defaultQuota);
}
int getQuotaForFlowGroup(String flowGroup) {
return this.perFlowGroupQuota.getOrDefault(flowGroup, defaultQuota);
}
@Setter
@AllArgsConstructor
protected static class QuotaCheck {
boolean proxyUserCheck;
boolean requesterCheck;
boolean flowGroupCheck;
String requesterMessage;
}
protected enum CountType {
USER_COUNT,
REQUESTER_COUNT,
FLOWGROUP_COUNT
}
} | 3,907 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/orchestration/UserQuotaManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.IOException;
import java.util.Collection;
import org.apache.gobblin.exception.QuotaExceededException;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
/**
* Manages the statically configured user quotas for the proxy user in user.to.proxy configuration, the API requester(s)
* and the flow group.
* It is used by the {@link DagManager} to ensure that the number of currently running jobs do not exceed the quota, if
* the quota is exceeded, the execution will fail without running on the underlying executor.
*/
public interface UserQuotaManager {
/**
* Initialize with the provided set of dags.
*/
void init(Collection<Dag<JobExecutionPlan>> dags) throws IOException;
/**
* Checks if the dagNode exceeds the statically configured user quota for the proxy user, requester user and flowGroup.
* It also increases the quota usage for proxy user, requester and the flowGroup of the given DagNode by one.
* @throws QuotaExceededException if the quota is exceeded
*/
void checkQuota(Collection<Dag.DagNode<JobExecutionPlan>> dagNode) throws IOException;
/**
* Decrement the quota by one for the proxy user and requesters corresponding to the provided {@link Dag.DagNode}.
* Returns true if successfully reduces the quota usage
*/
boolean releaseQuota(Dag.DagNode<JobExecutionPlan> dagNode) throws IOException;
}
| 3,908 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/db/ServiceDatabaseManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.db;
import java.util.Objects;
import org.flywaydb.core.Flyway;
import com.google.common.util.concurrent.AbstractIdleService;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.extern.slf4j.Slf4j;
/**
* This class initializes and migrates the database schema for Gobblin service.
*
* We use Flyway to run the migrations that are defined in resources/org/apache/gobblin/service/db/migration
* */
@Singleton
@Slf4j
public class ServiceDatabaseManager extends AbstractIdleService {
private final ServiceDatabaseProvider databaseProvider;
@Inject
public ServiceDatabaseManager(ServiceDatabaseProvider databaseProvider) {
this.databaseProvider = Objects.requireNonNull(databaseProvider);
}
@Override
protected void startUp()
throws Exception {
Flyway flyway =
Flyway.configure().locations("classpath:org/apache/gobblin/service/db/migration").failOnMissingLocations(true)
.dataSource(databaseProvider.getDatasource())
// Existing GaaS DBs have state store tables.
// Flyway will refuse to use such non-empty DBs by default. With baselineOnMigrate(true), it should
// create new tables, while keeping old ones intact.
.baselineOnMigrate(true)
.baselineVersion("0")
.load();
log.info("Ensuring service database is migrated to latest schema");
// Start the migration
flyway.migrate();
}
@Override
protected void shutDown()
throws Exception {
}
}
| 3,909 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/db/ServiceDatabaseProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.db;
import javax.sql.DataSource;
/**
* Provides access to Gobblin service database.
*
* DB schema is defined using Flyway migrations.
* */
public interface ServiceDatabaseProvider {
DataSource getDatasource();
}
| 3,910 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/db/ServiceDatabaseProviderImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.db;
import java.time.Duration;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.typesafe.config.Config;
import com.zaxxer.hikari.HikariDataSource;
import javax.inject.Inject;
import javax.sql.DataSource;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import org.apache.gobblin.util.jdbc.MysqlDataSourceUtils;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ConfigUtils;
public class ServiceDatabaseProviderImpl implements ServiceDatabaseProvider {
private static final Logger LOG = LoggerFactory.getLogger(ServiceDatabaseProviderImpl.class);
private static final AtomicInteger POOL_NUM = new AtomicInteger(0);
private final Configuration configuration;
private HikariDataSource dataSource;
@Inject
public ServiceDatabaseProviderImpl(Configuration configuration) {
this.configuration = configuration;
}
public DataSource getDatasource() {
ensureDataSource();
return dataSource;
}
private synchronized void ensureDataSource() {
if (dataSource != null) {
return;
}
dataSource = new HikariDataSource();
dataSource.setPoolName("HikariPool-" + POOL_NUM.incrementAndGet() + "-" + getClass().getSimpleName());
dataSource.setJdbcUrl(configuration.getUrl());
dataSource.setUsername(configuration.getUserName());
dataSource.setPassword(configuration.getPassword());
// MySQL server can timeout a connection so need to validate connections before use
final String validationQuery = MysqlDataSourceUtils.QUERY_CONNECTION_IS_VALID_AND_NOT_READONLY;
LOG.info("setting `DataSource` validation query: '" + validationQuery + "'");
// TODO: revisit following verification of successful connection pool migration:
// If your driver supports JDBC4 we strongly recommend not setting this property. This is for "legacy" drivers
// that do not support the JDBC4 Connection.isValid() API; see:
// https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby
dataSource.setConnectionTestQuery(validationQuery);
dataSource.setValidationTimeout(Duration.ofSeconds(5).toMillis());
// To improve performance, we set a maximum connection lifetime
// If database goes to read-only mode, then connection would not work correctly for up to configured lifetime
dataSource.setMaxLifetime(configuration.getMaxConnectionLifetime().toMillis());
dataSource.setIdleTimeout(Duration.ofSeconds(10).toMillis());
dataSource.setMinimumIdle(2);
dataSource.setMaximumPoolSize(configuration.getMaxConnections());
}
@Builder
@AllArgsConstructor
@Getter
@NoArgsConstructor
public static class Configuration {
private String url;
private String userName;
private String password;
@Builder.Default
private Duration maxConnectionLifetime = Duration.ofMillis(-1);
@Builder.Default
private int maxConnections = 100;
@Inject
public Configuration(Config config) {
this();
Objects.requireNonNull(config, "Config cannot be null");
url = config.getString(ServiceConfigKeys.SERVICE_DB_URL_KEY);
PasswordManager passwordManager = PasswordManager.getInstance(ConfigUtils.configToProperties(config));
userName = config.getString(ServiceConfigKeys.SERVICE_DB_USERNAME);
password = passwordManager.readPassword(config.getString(ServiceConfigKeys.SERVICE_DB_PASSWORD));
if(config.hasPath(ServiceConfigKeys.SERVICE_DB_MAX_CONNECTIONS)){
maxConnections = config.getInt(ServiceConfigKeys.SERVICE_DB_MAX_CONNECTIONS);
}
if(config.hasPath(ServiceConfigKeys.SERVICE_DB_MAX_CONNECTION_LIFETIME)){
maxConnectionLifetime = config.getDuration(ServiceConfigKeys.SERVICE_DB_MAX_CONNECTION_LIFETIME);
}
}
}
}
| 3,911 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/GobblinServiceFlowConfigV2ResourceHandlerWithWarmStandby.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import com.google.common.base.Optional;
import com.linkedin.data.transform.DataProcessingException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.util.PatchApplier;
import java.util.Properties;
import javax.inject.Inject;
import javax.inject.Named;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.service.FlowConfig;
import org.apache.gobblin.service.FlowConfigLoggedException;
import org.apache.gobblin.service.FlowConfigResourceLocalHandler;
import org.apache.gobblin.service.FlowConfigV2ResourceLocalHandler;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler;
import org.apache.gobblin.runtime.util.InjectionNames;
import org.apache.helix.HelixManager;
@Slf4j
public class GobblinServiceFlowConfigV2ResourceHandlerWithWarmStandby extends GobblinServiceFlowConfigV2ResourceHandler {
@Inject
public GobblinServiceFlowConfigV2ResourceHandlerWithWarmStandby(@Named(InjectionNames.SERVICE_NAME) String serviceName,
@Named(InjectionNames.FLOW_CATALOG_LOCAL_COMMIT) boolean flowCatalogLocalCommit,
FlowConfigV2ResourceLocalHandler handler, Optional<HelixManager> manager, GobblinServiceJobScheduler jobScheduler,
@Named(InjectionNames.FORCE_LEADER) boolean forceLeader) {
super(serviceName, flowCatalogLocalCommit, handler, manager, jobScheduler, forceLeader);
}
@Override
public UpdateResponse deleteFlowConfig(FlowId flowId, Properties header)
throws FlowConfigLoggedException {
return this.localHandler.deleteFlowConfig(flowId, header);
}
@Override
public UpdateResponse partialUpdateFlowConfig(FlowId flowId,
PatchRequest<FlowConfig> flowConfigPatch) throws FlowConfigLoggedException {
long modifiedWatermark = System.currentTimeMillis() / 1000;
FlowConfig flowConfig = getFlowConfig(flowId);
try {
PatchApplier.applyPatch(flowConfig, flowConfigPatch);
} catch (DataProcessingException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to apply partial update", e);
}
return updateFlowConfig(flowId, flowConfig, modifiedWatermark);
}
@Override
public UpdateResponse updateFlowConfig(FlowId flowId,
FlowConfig flowConfig) throws FlowConfigLoggedException {
// We have modifiedWatermark here to avoid update config happens at the same time on different hosts overwrite each other
// timestamp here will be treated as largest modifiedWatermark that we can update
long version = System.currentTimeMillis() / 1000;
return updateFlowConfig(flowId, flowConfig, version);
}
public UpdateResponse updateFlowConfig(FlowId flowId,
FlowConfig flowConfig, long modifiedWatermark) throws FlowConfigLoggedException {
String flowName = flowId.getFlowName();
String flowGroup = flowId.getFlowGroup();
if (!flowGroup.equals(flowConfig.getId().getFlowGroup()) || !flowName.equals(flowConfig.getId().getFlowName())) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST,
"flowName and flowGroup cannot be changed in update", null);
}
// We directly call localHandler to create flow config and put it in spec store
//Instead of helix message, forwarding message is done by change stream of spec store
return this.localHandler.updateFlowConfig(flowId, flowConfig, true, modifiedWatermark);
}
/**
* Adding {@link FlowConfig} call {@link FlowConfigResourceLocalHandler#createFlowConfig(FlowConfig)} directly.
* no matter it's active or standby, rely on the CDC stream for spec store to forward the change to other hosts
*
*/
@Override
public CreateResponse createFlowConfig(FlowConfig flowConfig)
throws FlowConfigLoggedException {
if (flowConfig.getProperties().containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST,
String.format("%s cannot be set by the user", ConfigurationKeys.FLOW_EXECUTION_ID_KEY), null);
}
CreateResponse response = null;
// We directly call localHandler to create flow config and put it in spec store
response = this.localHandler.createFlowConfig(flowConfig, true);
//Instead of helix message, forwarding message is done by change stream of spec store
// Do actual work on remote node, directly return success
return response == null ? new CreateResponse(new ComplexResourceKey<>(flowConfig.getId(), new EmptyRecord()),
HttpStatus.S_201_CREATED) : response;
}
}
| 3,912 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/GobblinServiceFlowConfigResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import java.util.UUID;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import com.google.common.base.Optional;
import com.linkedin.data.transform.DataProcessingException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.util.PatchApplier;
import javax.inject.Inject;
import javax.inject.Named;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpecSearchObject;
import org.apache.gobblin.service.FlowConfig;
import org.apache.gobblin.service.FlowConfigLoggedException;
import org.apache.gobblin.service.FlowConfigResourceLocalHandler;
import org.apache.gobblin.service.FlowConfigsResourceHandler;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler;
import org.apache.gobblin.service.modules.utils.HelixUtils;
import org.apache.gobblin.runtime.util.InjectionNames;
/**
* An HA (high available) aware {@link FlowConfigsResourceHandler} which consider if current node is Active or Standby.
* When a Standby mode detected, it will forward the rest-li request ({@link FlowConfig})
* to the Active. Otherwise it will handle it locally.
*/
@Slf4j
public class GobblinServiceFlowConfigResourceHandler implements FlowConfigsResourceHandler {
@Getter
private String serviceName;
private boolean flowCatalogLocalCommit;
protected FlowConfigResourceLocalHandler localHandler;
private Optional<HelixManager> helixManager;
private GobblinServiceJobScheduler jobScheduler;
private boolean forceLeader;
@Inject
public GobblinServiceFlowConfigResourceHandler(@Named(InjectionNames.SERVICE_NAME) String serviceName,
@Named(InjectionNames.FLOW_CATALOG_LOCAL_COMMIT) boolean flowCatalogLocalCommit,
FlowConfigResourceLocalHandler handler,
Optional<HelixManager> manager,
GobblinServiceJobScheduler jobScheduler,
@Named(InjectionNames.FORCE_LEADER) boolean forceLeader) {
this.flowCatalogLocalCommit = flowCatalogLocalCommit;
this.serviceName = serviceName;
this.localHandler = handler;
this.helixManager = manager;
this.jobScheduler = jobScheduler;
this.forceLeader = forceLeader;
}
@Override
public FlowConfig getFlowConfig(FlowId flowId)
throws FlowConfigLoggedException {
return this.localHandler.getFlowConfig(flowId);
}
@Override
public Collection<FlowConfig> getFlowConfig(FlowSpecSearchObject flowSpecSearchObject) throws FlowConfigLoggedException {
return this.localHandler.getFlowConfig(flowSpecSearchObject);
}
@Override
public Collection<FlowConfig> getAllFlowConfigs() {
return this.localHandler.getAllFlowConfigs();
}
@Override
public Collection<FlowConfig> getAllFlowConfigs(int start, int count) {
return this.localHandler.getAllFlowConfigs(start, count);
}
/**
* Adding {@link FlowConfig} should check if current node is active (master).
* If current node is active, call {@link FlowConfigResourceLocalHandler#createFlowConfig(FlowConfig)} directly.
* If current node is standby, forward {@link ServiceConfigKeys#HELIX_FLOWSPEC_ADD} to active. The remote active will
* then call {@link FlowConfigResourceLocalHandler#createFlowConfig(FlowConfig)}.
*
* Please refer to {@link org.apache.gobblin.service.modules.core.ControllerUserDefinedMessageHandlerFactory} for remote handling.
*
* For better I/O load balance, user can enable {@link GobblinServiceFlowConfigResourceHandler#flowCatalogLocalCommit}.
* The {@link FlowConfig} will be then persisted to {@link org.apache.gobblin.runtime.spec_catalog.FlowCatalog} first before it is
* forwarded to active node (if current node is standby) for execution.
*/
@Override
public CreateResponse createFlowConfig(FlowConfig flowConfig)
throws FlowConfigLoggedException {
String flowName = flowConfig.getId().getFlowName();
String flowGroup = flowConfig.getId().getFlowGroup();
if (flowConfig.getProperties().containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST,
String.format("%s cannot be set by the user", ConfigurationKeys.FLOW_EXECUTION_ID_KEY),
null);
}
checkHelixConnection(ServiceConfigKeys.HELIX_FLOWSPEC_ADD, flowName, flowGroup);
if (forceLeader) {
HelixUtils.throwErrorIfNotLeader(helixManager);
}
try {
if (!jobScheduler.isActive() && helixManager.isPresent()) {
CreateResponse response = null;
if (this.flowCatalogLocalCommit) {
// We will handle FS I/O locally for load balance before forwarding to remote node.
response = this.localHandler.createFlowConfig(flowConfig, true);
}
if (!flowConfig.hasExplain() || !flowConfig.isExplain()) {
//Forward the message to master only if it is not an "explain" request.
forwardMessage(ServiceConfigKeys.HELIX_FLOWSPEC_ADD, FlowConfigUtils.serializeFlowConfig(flowConfig), flowName, flowGroup);
}
// Do actual work on remote node, directly return success
return response == null ? new CreateResponse(new ComplexResourceKey<>(flowConfig.getId(), new EmptyRecord()),
HttpStatus.S_201_CREATED) : response;
} else {
return this.localHandler.createFlowConfig(flowConfig);
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Cannot create flowConfig [flowName=" + flowName + " flowGroup=" + flowGroup + "]", e);
}
}
/**
* Updating {@link FlowConfig} should check if current node is active (master).
* If current node is active, call {@link FlowConfigResourceLocalHandler#updateFlowConfig(FlowId, FlowConfig)} directly.
* If current node is standby, forward {@link ServiceConfigKeys#HELIX_FLOWSPEC_UPDATE} to active. The remote active will
* then call {@link FlowConfigResourceLocalHandler#updateFlowConfig(FlowId, FlowConfig)}.
*
* Please refer to {@link org.apache.gobblin.service.modules.core.ControllerUserDefinedMessageHandlerFactory} for remote handling.
*
* For better I/O load balance, user can enable {@link GobblinServiceFlowConfigResourceHandler#flowCatalogLocalCommit}.
* The {@link FlowConfig} will be then persisted to {@link org.apache.gobblin.runtime.spec_catalog.FlowCatalog} first before it is
* forwarded to active node (if current node is standby) for execution.
*/
@Override
public UpdateResponse updateFlowConfig(FlowId flowId, FlowConfig flowConfig)
throws FlowConfigLoggedException {
String flowName = flowId.getFlowName();
String flowGroup = flowId.getFlowGroup();
if (!flowGroup.equals(flowConfig.getId().getFlowGroup()) || !flowName.equals(flowConfig.getId().getFlowName())) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST,
"flowName and flowGroup cannot be changed in update", null);
}
checkHelixConnection(ServiceConfigKeys.HELIX_FLOWSPEC_UPDATE, flowName, flowGroup);
if (forceLeader) {
HelixUtils.throwErrorIfNotLeader(helixManager);
}
try {
if (!jobScheduler.isActive() && helixManager.isPresent()) {
if (this.flowCatalogLocalCommit) {
// We will handle FS I/O locally for load balance before forwarding to remote node.
this.localHandler.updateFlowConfig(flowId, flowConfig, false);
}
forwardMessage(ServiceConfigKeys.HELIX_FLOWSPEC_UPDATE, FlowConfigUtils.serializeFlowConfig(flowConfig), flowName, flowGroup);
// Do actual work on remote node, directly return success
log.info("Forwarding update flowConfig [flowName=" + flowName + " flowGroup=" + flowGroup + "]");
return new UpdateResponse(HttpStatus.S_200_OK);
} else {
return this.localHandler.updateFlowConfig(flowId, flowConfig);
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Cannot update flowConfig [flowName=" + flowName + " flowGroup=" + flowGroup + "]", e);
}
}
@Override
public UpdateResponse partialUpdateFlowConfig(FlowId flowId, PatchRequest<FlowConfig> flowConfigPatch) {
FlowConfig flowConfig = getFlowConfig(flowId);
try {
PatchApplier.applyPatch(flowConfig, flowConfigPatch);
} catch (DataProcessingException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to apply partial update", e);
}
return updateFlowConfig(flowId, flowConfig);
}
/**
* Deleting {@link FlowConfig} should check if current node is active (master).
* If current node is active, call {@link FlowConfigResourceLocalHandler#deleteFlowConfig(FlowId, Properties)} directly.
* If current node is standby, forward {@link ServiceConfigKeys#HELIX_FLOWSPEC_REMOVE} to active. The remote active will
* then call {@link FlowConfigResourceLocalHandler#deleteFlowConfig(FlowId, Properties)}.
*
* Please refer to {@link org.apache.gobblin.service.modules.core.ControllerUserDefinedMessageHandlerFactory} for remote handling.
*
* For better I/O load balance, user can enable {@link GobblinServiceFlowConfigResourceHandler#flowCatalogLocalCommit}.
* The {@link FlowConfig} will be then persisted to {@link org.apache.gobblin.runtime.spec_catalog.FlowCatalog} first before it is
* forwarded to active node (if current node is standby) for execution.
*/
@Override
public UpdateResponse deleteFlowConfig(FlowId flowId, Properties header)
throws FlowConfigLoggedException {
String flowName = flowId.getFlowName();
String flowGroup = flowId.getFlowGroup();
checkHelixConnection(ServiceConfigKeys.HELIX_FLOWSPEC_REMOVE, flowName, flowGroup);
if (forceLeader) {
HelixUtils.throwErrorIfNotLeader(helixManager);
}
try {
if (!jobScheduler.isActive() && helixManager.isPresent()) {
if (this.flowCatalogLocalCommit) {
// We will handle FS I/O locally for load balance before forwarding to remote node.
this.localHandler.deleteFlowConfig(flowId, header, false);
}
forwardMessage(ServiceConfigKeys.HELIX_FLOWSPEC_REMOVE, FlowConfigUtils.serializeFlowId(flowId), flowName, flowGroup);
return new UpdateResponse(HttpStatus.S_200_OK);
} else {
return this.localHandler.deleteFlowConfig(flowId, header);
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Cannot delete flowConfig [flowName=" + flowName + " flowGroup=" + flowGroup + "]", e);
}
}
private void checkHelixConnection(String opr, String flowName, String flowGroup) throws FlowConfigLoggedException {
if (this.helixManager.isPresent() && !this.helixManager.get().isConnected()) {
// Specs in store will be notified when Scheduler is added as listener to FlowCatalog, so ignore
// .. Specs if in cluster mode and Helix is not yet initialized
log.warn("System not yet initialized. Skipping operation " + opr);
throw new FlowConfigLoggedException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"System not yet initialized. Skipping " + opr + " flowConfig [flowName=" + flowName + " flowGroup=" + flowGroup + "]");
}
}
private void forwardMessage(String msgSubType, String val, String flowName, String flowGroup) {
HelixUtils.sendUserDefinedMessage(msgSubType, val, UUID.randomUUID().toString(), InstanceType.CONTROLLER,
helixManager.get(), log);
log.info("{} Forwarding {} flowConfig [flowName={} flowGroup={}", serviceName, msgSubType, flowName, flowGroup + "]");
}
}
| 3,913 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/FlowConfigUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import com.google.common.collect.Maps;
import com.linkedin.data.template.StringMap;
import com.typesafe.config.Config;
import org.apache.gobblin.service.FlowConfig;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.Schedule;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PropertiesUtils;
public class FlowConfigUtils {
private static final String FLOWCONFIG = "fc";
private static final String FLOWCONFIG_ID = FLOWCONFIG + '-' + "id";
private static final String FLOWCONFIG_ID_NAME = FLOWCONFIG_ID + '-' + "name";
private static final String FLOWCONFIG_ID_GROUP = FLOWCONFIG_ID + '-' + "group";
private static final String FLOWCONFIG_SCHEDULE = FLOWCONFIG + '-' + "sch";
private static final String FLOWCONFIG_SCHEDULE_CRON = FLOWCONFIG_SCHEDULE + '-' + "cron";
private static final String FLOWCONFIG_SCHEDULE_RUN_IMMEDIATELY = FLOWCONFIG_SCHEDULE + '-' + "runImmediately";
private static final String FLOWCONFIG_TEMPLATEURIS = FLOWCONFIG + '-' + "templateUris";
public static String serializeFlowId(FlowId id) throws IOException {
Properties properties = new Properties();
properties.setProperty(FLOWCONFIG_ID_NAME, id.getFlowName());
properties.setProperty(FLOWCONFIG_ID_GROUP, id.getFlowGroup());
return PropertiesUtils.serialize(properties);
}
public static FlowId deserializeFlowId(String serialized) throws IOException {
Properties properties = PropertiesUtils.deserialize(serialized);
FlowId id = new FlowId();
id.setFlowName(properties.getProperty(FLOWCONFIG_ID_NAME));
id.setFlowGroup(properties.getProperty(FLOWCONFIG_ID_GROUP));
return id;
}
public static String serializeFlowConfig(FlowConfig flowConfig) throws IOException {
Properties properties = new Properties();
properties.putAll(flowConfig.getProperties());
properties.setProperty(FLOWCONFIG_ID_NAME, flowConfig.getId().getFlowName());
properties.setProperty(FLOWCONFIG_ID_GROUP, flowConfig.getId().getFlowGroup());
if (flowConfig.hasSchedule()) {
properties.setProperty(FLOWCONFIG_SCHEDULE_CRON, flowConfig.getSchedule().getCronSchedule());
properties.setProperty(FLOWCONFIG_SCHEDULE_RUN_IMMEDIATELY, Boolean.toString(flowConfig.getSchedule().isRunImmediately()));
}
if (flowConfig.hasTemplateUris()) {
properties.setProperty(FLOWCONFIG_TEMPLATEURIS, flowConfig.getTemplateUris());
}
return PropertiesUtils.serialize(properties);
}
public static FlowConfig deserializeFlowConfig(String serialized) throws IOException {
Properties properties = PropertiesUtils.deserialize(serialized);
FlowConfig flowConfig = new FlowConfig().setId(new FlowId()
.setFlowName(properties.getProperty(FLOWCONFIG_ID_NAME))
.setFlowGroup(properties.getProperty(FLOWCONFIG_ID_GROUP)));
if (properties.containsKey(FLOWCONFIG_SCHEDULE_CRON)) {
flowConfig.setSchedule(new Schedule()
.setCronSchedule(properties.getProperty(FLOWCONFIG_SCHEDULE_CRON))
.setRunImmediately(Boolean.valueOf(properties.getProperty(FLOWCONFIG_SCHEDULE_RUN_IMMEDIATELY))));
}
if (properties.containsKey(FLOWCONFIG_TEMPLATEURIS)) {
flowConfig.setTemplateUris(properties.getProperty(FLOWCONFIG_TEMPLATEURIS));
}
properties.remove(FLOWCONFIG_ID_NAME);
properties.remove(FLOWCONFIG_ID_GROUP);
properties.remove(FLOWCONFIG_SCHEDULE_CRON);
properties.remove(FLOWCONFIG_SCHEDULE_RUN_IMMEDIATELY);
properties.remove(FLOWCONFIG_TEMPLATEURIS);
flowConfig.setProperties(new StringMap(Maps.fromProperties(properties)));
return flowConfig;
}
public static List<String> getDataNodes(Config flowConfig, String identifierKey, Map<String, String> dataNodeAliasMap) {
List<String> dataNodes = ConfigUtils.getStringList(flowConfig, identifierKey);
return dataNodes.stream().map(dataNode -> dataNodeAliasMap.getOrDefault(dataNode, dataNode)).collect(Collectors.toList());
}
public static String getDataNode(Config flowConfig, String identifierKey, Map<String, String> dataNodeAliasMap) {
String dataNode = ConfigUtils.getString(flowConfig, identifierKey, "");
return dataNodeAliasMap.getOrDefault(dataNode, dataNode);
}
}
| 3,914 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/GobblinServiceFlowConfigV2ResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import org.apache.helix.HelixManager;
import com.google.common.base.Optional;
import javax.inject.Inject;
import javax.inject.Named;
import org.apache.gobblin.service.FlowConfigV2ResourceLocalHandler;
import org.apache.gobblin.service.FlowConfigsV2ResourceHandler;
import org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler;
import org.apache.gobblin.runtime.util.InjectionNames;
public class GobblinServiceFlowConfigV2ResourceHandler extends GobblinServiceFlowConfigResourceHandler
implements FlowConfigsV2ResourceHandler {
@Inject
public GobblinServiceFlowConfigV2ResourceHandler(@Named(InjectionNames.SERVICE_NAME) String serviceName,
@Named(InjectionNames.FLOW_CATALOG_LOCAL_COMMIT) boolean flowCatalogLocalCommit,
FlowConfigV2ResourceLocalHandler handler, Optional<HelixManager> manager, GobblinServiceJobScheduler jobScheduler,
@Named(InjectionNames.FORCE_LEADER) boolean forceLeader) {
super(serviceName, flowCatalogLocalCommit, handler, manager, jobScheduler, forceLeader);
}
}
| 3,915 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/GobblinServiceFlowExecutionResourceHandlerWithWarmStandby.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import com.google.common.base.Optional;
import com.google.common.eventbus.EventBus;
import com.google.inject.Inject;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.UpdateResponse;
import java.io.IOException;
import java.sql.SQLException;
import javax.inject.Named;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.api.DagActionStore;
import org.apache.gobblin.runtime.util.InjectionNames;
import org.apache.gobblin.service.FlowExecutionResourceLocalHandler;
import org.apache.gobblin.service.modules.core.GobblinServiceManager;
import org.apache.helix.HelixManager;
@Slf4j
public class GobblinServiceFlowExecutionResourceHandlerWithWarmStandby extends GobblinServiceFlowExecutionResourceHandler{
private DagActionStore dagActionStore;
@Inject
public GobblinServiceFlowExecutionResourceHandlerWithWarmStandby(FlowExecutionResourceLocalHandler handler,
@Named(GobblinServiceManager.SERVICE_EVENT_BUS_NAME) EventBus eventBus,
Optional<HelixManager> manager, @Named(InjectionNames.FORCE_LEADER) boolean forceLeader, DagActionStore dagActionStore) {
super(handler, eventBus, manager, forceLeader);
this.dagActionStore = dagActionStore;
}
@Override
public void resume(ComplexResourceKey<org.apache.gobblin.service.FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
Long flowExecutionId = key.getKey().getFlowExecutionId();
try {
// If an existing resume request is still pending then do not accept this request
if (this.dagActionStore.exists(flowGroup, flowName, flowExecutionId.toString(), DagActionStore.FlowActionType.RESUME)) {
this.prepareError("There is already a pending RESUME action for this flow. Please wait to resubmit and wait "
+ "for action to be completed.", HttpStatus.S_409_CONFLICT);
return;
}
this.dagActionStore.addDagAction(flowGroup, flowName, flowExecutionId.toString(), DagActionStore.FlowActionType.RESUME);
} catch (IOException | SQLException e) {
log.warn(
String.format("Failed to add execution resume action for flow %s %s %s to dag action store due to", flowGroup,
flowName, flowExecutionId), e);
this.prepareError(e.getMessage(), HttpStatus.S_500_INTERNAL_SERVER_ERROR);
}
}
private void prepareError(String exceptionMessage, HttpStatus errorType) {
if (errorType == HttpStatus.S_409_CONFLICT) {
throw new RestLiServiceException(HttpStatus.S_409_CONFLICT, exceptionMessage);
} else if (errorType == HttpStatus.S_400_BAD_REQUEST) {
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST);
}
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, exceptionMessage);
}
@Override
public UpdateResponse delete(ComplexResourceKey<org.apache.gobblin.service.FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
Long flowExecutionId = key.getKey().getFlowExecutionId();
try {
// If an existing kill request is still pending then do not accept this request
if (this.dagActionStore.exists(flowGroup, flowName, flowExecutionId.toString(), DagActionStore.FlowActionType.KILL)) {
this.prepareError("There is already a pending KILL action for this flow. Please wait to resubmit and wait "
+ "for action to be completed.", HttpStatus.S_400_BAD_REQUEST);
}
this.dagActionStore.addDagAction(flowGroup, flowName, flowExecutionId.toString(), DagActionStore.FlowActionType.KILL);
return new UpdateResponse(HttpStatus.S_200_OK);
} catch (IOException | SQLException e) {
this.prepareError(String.format("Failed to add execution delete action for flow %s %s %s to dag action store due to", flowGroup,
flowName, flowExecutionId), HttpStatus.S_500_INTERNAL_SERVER_ERROR);
return new UpdateResponse(HttpStatus.S_500_INTERNAL_SERVER_ERROR);
}
}
}
| 3,916 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/restli/GobblinServiceFlowExecutionResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.restli;
import java.util.List;
import org.apache.helix.HelixManager;
import com.google.common.base.Optional;
import com.google.common.eventbus.EventBus;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.UpdateResponse;
import javax.inject.Inject;
import javax.inject.Named;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.service.FlowExecution;
import org.apache.gobblin.service.FlowExecutionResourceHandler;
import org.apache.gobblin.service.FlowExecutionResourceLocalHandler;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.FlowStatusId;
import org.apache.gobblin.service.modules.core.GobblinServiceManager;
import org.apache.gobblin.service.modules.utils.HelixUtils;
import org.apache.gobblin.runtime.util.InjectionNames;
import org.apache.gobblin.service.monitoring.KillFlowEvent;
import org.apache.gobblin.service.monitoring.ResumeFlowEvent;
/**
* {@link FlowExecutionResourceHandler} that calls underlying resource handler, but does extra work that requires objects
* like the {@link HelixManager}. For now, that is just checking leadership and sending the kill through the eventBus
* for the delete method.
*/
@Slf4j
public class GobblinServiceFlowExecutionResourceHandler implements FlowExecutionResourceHandler {
private FlowExecutionResourceLocalHandler localHandler;
private EventBus eventBus;
private Optional<HelixManager> helixManager;
private boolean forceLeader;
@Inject
public GobblinServiceFlowExecutionResourceHandler(FlowExecutionResourceLocalHandler handler,
@Named(GobblinServiceManager.SERVICE_EVENT_BUS_NAME) EventBus eventBus,
Optional<HelixManager> manager, @Named(InjectionNames.FORCE_LEADER) boolean forceLeader) {
this.localHandler = handler;
this.eventBus = eventBus;
this.helixManager = manager;
this.forceLeader = forceLeader;
}
@Override
public FlowExecution get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
return this.localHandler.get(key);
}
@Override
public List<FlowExecution> getLatestFlowExecution(PagingContext context, FlowId flowId, Integer count, String tag,
String executionStatus, Boolean includeIssues) {
return this.localHandler.getLatestFlowExecution(context, flowId, count, tag, executionStatus, includeIssues);
}
@Override
public List<FlowExecution> getLatestFlowGroupExecutions(PagingContext context, String flowGroup, Integer countPerFlow,
String tag, Boolean includeIssues) {
return this.localHandler.getLatestFlowGroupExecutions(context, flowGroup, countPerFlow, tag, includeIssues);
}
@Override
public void resume(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
Long flowExecutionId = key.getKey().getFlowExecutionId();
if (this.forceLeader) {
HelixUtils.throwErrorIfNotLeader(this.helixManager);
}
this.eventBus.post(new ResumeFlowEvent(flowGroup, flowName, flowExecutionId));
}
@Override
public UpdateResponse delete(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
Long flowExecutionId = key.getKey().getFlowExecutionId();
if (this.forceLeader) {
HelixUtils.throwErrorIfNotLeader(this.helixManager);
}
this.eventBus.post(new KillFlowEvent(flowGroup, flowName, flowExecutionId));
return new UpdateResponse(HttpStatus.S_200_OK);
}
}
| 3,917 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/IdentityFlowToJobSpecCompiler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.ServiceNode;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
/***
* Take in a logical {@link Spec} ie flow and compile corresponding materialized job {@link Spec}
* and its mapping to {@link SpecExecutor}.
*/
@Alpha
public class IdentityFlowToJobSpecCompiler extends BaseFlowToJobSpecCompiler {
public IdentityFlowToJobSpecCompiler(Config config) {
super(config, true);
}
public IdentityFlowToJobSpecCompiler(Config config, boolean instrumentationEnabled) {
super(config, Optional.<Logger>absent(), instrumentationEnabled);
}
public IdentityFlowToJobSpecCompiler(Config config, Optional<Logger> log) {
super(config, log, true);
}
public IdentityFlowToJobSpecCompiler(Config config, Optional<Logger> log, boolean instrumentationEnabled) {
super(config, log, instrumentationEnabled);
}
@Override
public Dag<JobExecutionPlan> compileFlow(Spec spec) {
Preconditions.checkNotNull(spec);
Preconditions.checkArgument(spec instanceof FlowSpec, "IdentityFlowToJobSpecCompiler only converts FlowSpec to JobSpec");
long startTime = System.nanoTime();
FlowSpec flowSpec = (FlowSpec) spec;
String source = flowSpec.getConfig().getString(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY);
String destination = flowSpec.getConfig().getString(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY);
log.info(String.format("Compiling flow for source: %s and destination: %s", source, destination));
JobSpec jobSpec = jobSpecGenerator(flowSpec);
Instrumented.markMeter(this.flowCompilationSuccessFulMeter);
Instrumented.updateTimer(this.flowCompilationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
List<JobExecutionPlan> jobExecutionPlans;
try {
jobExecutionPlans = getJobExecutionPlans(source, destination, jobSpec);
} catch (InterruptedException | ExecutionException e) {
Instrumented.markMeter(this.flowCompilationFailedMeter);
throw new RuntimeException("Cannot determine topology capabilities", e);
}
return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
}
private List<JobExecutionPlan> getJobExecutionPlans(String source, String destination, JobSpec jobSpec)
throws ExecutionException, InterruptedException {
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
for (TopologySpec topologySpec : topologySpecMap.values()) {
Map<ServiceNode, ServiceNode> capabilities = topologySpec.getSpecExecutor().getCapabilities().get();
for (Map.Entry<ServiceNode, ServiceNode> capability : capabilities.entrySet()) {
log.info(String.format("Evaluating current JobSpec: %s against TopologySpec: %s with "
+ "capability of source: %s and destination: %s ", jobSpec.getUri(), topologySpec.getUri(),
capability.getKey(), capability.getValue()));
if (source.equals(capability.getKey().getNodeName()) && destination
.equals(capability.getValue().getNodeName())) {
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(jobSpec, topologySpec.getSpecExecutor());
log.info(String
.format("Current JobSpec: %s is executable on TopologySpec: %s. Added TopologySpec as candidate.",
jobSpec.getUri(), topologySpec.getUri()));
log.info("Since we found a candidate executor, we will not try to compute more. "
+ "(Intended limitation for IdentityFlowToJobSpecCompiler)");
jobExecutionPlans.add(jobExecutionPlan);
return jobExecutionPlans;
}
}
}
return jobExecutionPlans;
}
} | 3,918 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/MockedSpecCompiler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.util.ConfigUtils;
/**
* This mocked SpecCompiler class creates 3 dummy job specs to emulate flow spec compiler.
* It can also be used to compile in a certain way or not to compile at all to write negative test cases.
* It uses {@link InMemorySpecExecutor} for these dummy specs.
*/
public class MockedSpecCompiler extends IdentityFlowToJobSpecCompiler {
private static final int NUMBER_OF_JOBS = 3;
public static final String UNCOMPILABLE_FLOW = "uncompilableFlow";
public MockedSpecCompiler(Config config) {
super(config);
}
@Override
public Dag<JobExecutionPlan> compileFlow(Spec spec) {
String flowName = (String) ((FlowSpec) spec).getConfigAsProperties().get(ConfigurationKeys.FLOW_NAME_KEY);
if (flowName.equalsIgnoreCase(UNCOMPILABLE_FLOW)) {
return null;
}
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
long flowExecutionId = System.currentTimeMillis();
int i = 0;
while(i++ < NUMBER_OF_JOBS) {
String specUri = "/foo/bar/spec/" + i;
Properties properties = new Properties();
properties.put(ConfigurationKeys.FLOW_NAME_KEY, flowName);
properties.put(ConfigurationKeys.FLOW_GROUP_KEY, ((FlowSpec)spec).getConfigAsProperties().get(ConfigurationKeys.FLOW_GROUP_KEY));
properties.put(ConfigurationKeys.JOB_NAME_KEY, ((FlowSpec)spec).getConfigAsProperties().get(ConfigurationKeys.FLOW_NAME_KEY) + "_" + i);
properties.put(ConfigurationKeys.JOB_GROUP_KEY, ((FlowSpec)spec).getConfigAsProperties().get(ConfigurationKeys.FLOW_GROUP_KEY) + "_" + i);
properties.put(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId);
JobSpec jobSpec = JobSpec.builder(specUri)
.withConfig(ConfigUtils.propertiesToConfig(properties))
.withVersion("1")
.withDescription("Spec Description")
.build();
jobExecutionPlans.add(new JobExecutionPlan(jobSpec, new InMemorySpecExecutor(ConfigFactory.empty())));
}
return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
}
}
| 3,919 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/FlowEdgeContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import com.typesafe.config.Config;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.service.modules.dataset.DatasetDescriptor;
import org.apache.gobblin.service.modules.flowgraph.FlowEdge;
/**
* A helper class used to maintain additional context associated with each {@link FlowEdge} during path
* computation while the edge is explored for its eligibility. The additional context includes the input
* {@link DatasetDescriptor} of this edge which is compatible with the previous {@link FlowEdge}'s output
* {@link DatasetDescriptor} (where "previous" means the immediately preceding {@link FlowEdge} visited before
* the current {@link FlowEdge}), and the corresponding output dataset descriptor of the current {@link FlowEdge}.
*/
@AllArgsConstructor
@EqualsAndHashCode(exclude = {"mergedConfig", "specExecutor"})
@Getter
public class FlowEdgeContext {
private FlowEdge edge;
private DatasetDescriptor inputDatasetDescriptor;
private DatasetDescriptor outputDatasetDescriptor;
private Config mergedConfig;
private SpecExecutor specExecutor;
@Override
public String toString() {
return edge == null ? "Null" : edge.toString();
}
} | 3,920 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/BaseFlowToJobSpecCompiler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.modules.orchestration.UserQuotaManager;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import javax.annotation.Nonnull;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.job_catalog.FSJobCatalog;
import org.apache.gobblin.runtime.job_spec.ResolvedJobSpec;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PropertiesUtils;
// Provide base implementation for constructing multi-hops route.
@Alpha
public abstract class BaseFlowToJobSpecCompiler implements SpecCompiler {
// Since {@link SpecCompiler} is an {@link SpecCatalogListener}, it is expected that any Spec change should be reflected
// to these data structures.
@Getter
@Setter
protected final Map<URI, TopologySpec> topologySpecMap;
protected final Config config;
protected final Logger log;
protected final Optional<FSJobCatalog> templateCatalog;
protected final MetricContext metricContext;
@Getter
protected Optional<Meter> flowCompilationSuccessFulMeter;
@Getter
protected Optional<Meter> flowCompilationFailedMeter;
@Getter
protected Optional<Timer> flowCompilationTimer;
@Getter
protected Optional<Timer> dataAuthorizationTimer;
@Getter
@Setter
protected boolean active;
private boolean warmStandbyEnabled;
private Optional<UserQuotaManager> userQuotaManager;
public BaseFlowToJobSpecCompiler(Config config){
this(config,true);
}
public BaseFlowToJobSpecCompiler(Config config, boolean instrumentationEnabled){
this(config, Optional.<Logger>absent(), true);
}
public BaseFlowToJobSpecCompiler(Config config, Optional<Logger> log){
this(config, log,true);
}
public BaseFlowToJobSpecCompiler(Config config, Optional<Logger> log, boolean instrumentationEnabled){
this.log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
if (instrumentationEnabled) {
this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), IdentityFlowToJobSpecCompiler.class);
this.flowCompilationSuccessFulMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_SUCCESSFUL_METER));
this.flowCompilationFailedMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_FAILED_METER));
this.flowCompilationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.FLOW_COMPILATION_TIMER));
this.dataAuthorizationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.DATA_AUTHORIZATION_TIMER));
}
else {
this.metricContext = null;
this.flowCompilationSuccessFulMeter = Optional.absent();
this.flowCompilationFailedMeter = Optional.absent();
this.flowCompilationTimer = Optional.absent();
this.dataAuthorizationTimer = Optional.absent();
}
this.warmStandbyEnabled = ConfigUtils.getBoolean(config, ServiceConfigKeys.GOBBLIN_SERVICE_WARM_STANDBY_ENABLED_KEY, false);
if (this.warmStandbyEnabled) {
userQuotaManager = Optional.of(GobblinConstructorUtils.invokeConstructor(UserQuotaManager.class,
ConfigUtils.getString(config, ServiceConfigKeys.QUOTA_MANAGER_CLASS, ServiceConfigKeys.DEFAULT_QUOTA_MANAGER), config));
} else {
userQuotaManager = Optional.absent();
}
this.topologySpecMap = Maps.newConcurrentMap();
this.config = config;
/***
* ETL-5996
* For multi-tenancy, the following needs to be added:
* 1. Change singular templateCatalog to Map<URI, JobCatalogWithTemplates> to support multiple templateCatalogs
* 2. Pick templateCatalog from JobCatalogWithTemplates based on URI, and try to resolve JobSpec using that
*/
try {
if (this.config.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)
&& StringUtils.isNotBlank(this.config.getString(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY))) {
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
this.config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
this.templateCatalog = Optional.of(new FSJobCatalog(templateCatalogCfg));
} else {
this.templateCatalog = Optional.absent();
}
} catch (IOException e) {
throw new RuntimeException("Could not initialize FlowCompiler because of "
+ "TemplateCatalog initialization failure", e);
}
}
@Override
public void awaitHealthy() throws InterruptedException {
//Do nothing
return;
}
private synchronized AddSpecResponse onAddTopologySpec(TopologySpec spec) {
log.info("Loading topology {}", spec.toLongString());
for (Map.Entry entry : spec.getConfigAsProperties().entrySet()) {
log.info("topo: {} --> {}", entry.getKey(), entry.getValue());
}
topologySpecMap.put(spec.getUri(), spec);
return new AddSpecResponse(null);
}
private AddSpecResponse onAddFlowSpec(FlowSpec flowSpec) {
Properties flowSpecProperties = flowSpec.getConfigAsProperties();
if (topologySpecMap.containsKey(flowSpec.getUri())) {
log.error("flow spec URI: {} is the same as one of the spec executors uris, ignore the flow", flowSpec.getUri());
flowSpec.getCompilationErrors().add(new FlowSpec.CompilationError(0, "invalid flow spec uri " + flowSpec.getUri() + " because it is the same as one of the spec executors uri"));
return null;
}
if (flowSpecProperties.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY) && StringUtils.isNotBlank(
flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY))) {
try {
new CronExpression(flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY));
} catch (Exception e) {
log.error("invalid cron schedule: {}", flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY), e);
flowSpec.getCompilationErrors().add(new FlowSpec.CompilationError(0, "invalid cron schedule: " + flowSpecProperties.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY) + e.getMessage()));
return null;
}
}
String response = null;
// always try to compile the flow to verify if it is compilable
Dag<JobExecutionPlan> dag = this.compileFlow(flowSpec);
// If dag is null then a compilation error has occurred
if (dag != null && !dag.isEmpty()) {
response = dag.toString();
}
if (FlowCatalog.isCompileSuccessful(response) && this.userQuotaManager.isPresent() && !flowSpec.isExplain() &&
!flowSpec.getConfigAsProperties().containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) {
try {
// We only check quota for adhoc flow, since we don't have the execution id for run-immediately flow
userQuotaManager.get().checkQuota(dag.getStartNodes());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return new AddSpecResponse<>(response);
}
@Override
public AddSpecResponse onAddSpec(Spec addedSpec) {
if (addedSpec instanceof FlowSpec) {
return onAddFlowSpec((FlowSpec) addedSpec);
} else if (addedSpec instanceof TopologySpec) {
return onAddTopologySpec( (TopologySpec) addedSpec);
}
return new AddSpecResponse(null);
}
public void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion) {
onDeleteSpec(deletedSpecURI, deletedSpecVersion, new Properties());
}
@Override
public synchronized void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion, Properties headers) {
if (topologySpecMap.containsKey(deletedSpecURI)) {
topologySpecMap.remove(deletedSpecURI);
}
}
@Override
public synchronized void onUpdateSpec(Spec updatedSpec) {
topologySpecMap.put(updatedSpec.getUri(), (TopologySpec) updatedSpec);
}
@Nonnull
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return null != this.metricContext;
}
@Override
public List<Tag<?>> generateTags(State state){
return Collections.emptyList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
throw new UnsupportedOperationException();
}
@Override
public void switchMetricContext(MetricContext context) {
throw new UnsupportedOperationException();
}
@Override
public Map<URI, TopologySpec> getTopologySpecMap() {
return this.topologySpecMap;
}
public abstract Dag<JobExecutionPlan> compileFlow(Spec spec);
/**
* Naive implementation of generating jobSpec, which fetch the first available template,
* in an exemplified single-hop FlowCompiler implementation.
* @param flowSpec
* @return
*/
protected JobSpec jobSpecGenerator(FlowSpec flowSpec) {
JobSpec jobSpec;
JobSpec.Builder jobSpecBuilder = JobSpec.builder(jobSpecURIGenerator(flowSpec))
.withConfig(flowSpec.getConfig())
.withDescription(flowSpec.getDescription())
.withVersion(flowSpec.getVersion());
if (flowSpec.getTemplateURIs().isPresent() && templateCatalog.isPresent()) {
// Only first template uri will be honored for Identity
jobSpecBuilder = jobSpecBuilder.withTemplate(flowSpec.getTemplateURIs().get().iterator().next());
try {
jobSpec = new ResolvedJobSpec(jobSpecBuilder.build(), templateCatalog.get());
log.info("Resolved JobSpec properties are: " + PropertiesUtils.prettyPrintProperties(jobSpec.getConfigAsProperties()));
} catch (SpecNotFoundException | JobTemplate.TemplateException e) {
throw new RuntimeException("Could not resolve template in JobSpec from TemplateCatalog", e);
}
} else {
jobSpec = jobSpecBuilder.build();
log.info("Unresolved JobSpec properties are: " + jobSpec.getConfigAsProperties());
}
// Remove schedule
jobSpec.setConfig(jobSpec.getConfig().withoutPath(ConfigurationKeys.JOB_SCHEDULE_KEY));
// Add job.name and job.group
if (flowSpec.getConfig().hasPath(ConfigurationKeys.FLOW_NAME_KEY)) {
jobSpec.setConfig(jobSpec.getConfig()
.withValue(ConfigurationKeys.JOB_NAME_KEY, flowSpec.getConfig().getValue(ConfigurationKeys.FLOW_NAME_KEY)));
}
if (flowSpec.getConfig().hasPath(ConfigurationKeys.FLOW_GROUP_KEY)) {
jobSpec.setConfig(jobSpec.getConfig()
.withValue(ConfigurationKeys.JOB_GROUP_KEY, flowSpec.getConfig().getValue(ConfigurationKeys.FLOW_GROUP_KEY)));
}
// Add flow execution id for this compilation
long flowExecutionId = FlowUtils.getOrCreateFlowExecutionId(flowSpec);
jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY,
ConfigValueFactory.fromAnyRef(flowExecutionId)));
// Reset properties in Spec from Config
jobSpec.setConfigAsProperties(ConfigUtils.configToProperties(jobSpec.getConfig()));
return jobSpec;
}
/**
* It can receive multiple number of parameters, needed to generate a unique URI.
* Implementation is flowSpecCompiler dependent.
* This method should return URI which has job name at third place, when split by "/"
* e.g. /flowGroup/flowName
* /flowGroup/flowName/sourceNode-targetNode
* SafeDatasetCommit creates state store using this name and
* {@link org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor} extract job name to find the state store path.
* @param objects
* @return
*/
public URI jobSpecURIGenerator(Object... objects) {
return ((FlowSpec)objects[0]).getUri();
}
/**
* It returns the template uri for job.
* This method can be overridden by derived FlowToJobSpecCompiler classes.
* @param flowSpec
* @return template URI
*/
protected URI jobSpecTemplateURIGenerator(FlowSpec flowSpec) {
// For now only first template uri will be honored for Identity
return flowSpec.getTemplateURIs().get().iterator().next();
}
} | 3,921 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/NoopDataMovementAuthorizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import com.typesafe.config.Config;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
/**
* {@link DataMovementAuthorizer} that always returns true.
*/
public class NoopDataMovementAuthorizer implements DataMovementAuthorizer {
public NoopDataMovementAuthorizer(Config config) {}
public boolean isMovementAuthorized(FlowSpec flowSpec, DataNode sourceNode, DataNode destNode) {
return true;
}
} | 3,922 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/DataMovementAuthorizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
/**
* Class that is called each time a flow is compiled to decide whether the data movement is authorized or not.
*/
public interface DataMovementAuthorizer {
/**
* Return true if the data movement is authorized given the flowspec and source/destination data node.
*/
public boolean isMovementAuthorized(FlowSpec flowSpec, DataNode sourceNode, DataNode destNode);
} | 3,923 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/SpecCompiler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import java.net.URI;
import java.util.Map;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
/***
* Take in a logical {@link Spec} and compile corresponding materialized {@link Spec}s
* and the mapping to {@link SpecExecutor} that they can be run on.
*/
public interface SpecCompiler extends SpecCatalogListener, Instrumentable {
/***
* Take in a logical {@link Spec} and compile corresponding materialized {@link Spec}s
* and the mapping to {@link SpecExecutor} that they can be run on.
* All the specs generated from the compileFlow must have a
* {@value org.apache.gobblin.configuration.ConfigurationKeys#FLOW_EXECUTION_ID_KEY}
* @param spec {@link Spec} to compile.
* @return Map of materialized physical {@link Spec} and {@link SpecExecutor}.
*/
Dag<JobExecutionPlan> compileFlow(Spec spec);
/***
* Map of {@link Spec} URI and {@link TopologySpec} the {@link SpecCompiler}
* is aware about.
* @return Map of {@link Spec} URI and {@link TopologySpec}
*/
Map<URI, TopologySpec> getTopologySpecMap();
/**
* Mark the {@link SpecCompiler} active/inactive. Useful to trigger the initialization of {@link SpecCompiler}, if
* necessary, before it can start compiling {@link org.apache.gobblin.runtime.api.FlowSpec}s.
* @param active
*/
void setActive(boolean active);
/**
* Waits for the {@link SpecCompiler} to become healthy. A {@link SpecCompiler} is healthy when all the component
* services it depends on have been successfully initialized. For instance, the {@link MultiHopFlowCompiler} is healthy
* when the {@link org.apache.gobblin.service.modules.flowgraph.DataNode}s and {@link org.apache.gobblin.service.modules.flowgraph.FlowEdge}s
* can be added to the {@link org.apache.gobblin.service.modules.flowgraph.FlowGraph}. The {@link org.apache.gobblin.service.modules.flowgraph.FlowEdge}
* instantiation in turn depends on the successful initialization of {@link org.apache.gobblin.runtime.spec_catalog.TopologyCatalog}, which
* instantiates all the configured {@link SpecExecutor}s.
*/
public void awaitHealthy() throws InterruptedException;
} | 3,924 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/FlowGraphPath.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import com.google.common.collect.Maps;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.service.modules.dataset.DatasetDescriptor;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode;
import org.apache.gobblin.service.modules.flowgraph.FlowEdge;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.service.modules.template.FlowTemplate;
import org.apache.gobblin.util.ConfigUtils;
/**
* A class that encapsulates a path in the {@link org.apache.gobblin.service.modules.flowgraph.FlowGraph}.
*/
public class FlowGraphPath {
@Getter
private List<List<FlowEdgeContext>> paths;
private final FlowSpec flowSpec;
private final Long flowExecutionId;
public FlowGraphPath(FlowSpec flowSpec, Long flowExecutionId) {
this.flowSpec = flowSpec;
this.flowExecutionId = flowExecutionId;
}
public void addPath(List<FlowEdgeContext> path) {
if (this.paths == null) {
this.paths = new ArrayList<>();
}
this.paths.add(path);
}
/**
* A method to convert a path of {@link FlowEdgeContext}s into a {@link Dag<JobExecutionPlan>}.
* @param sysConfig containing environment config (e.g. metric/tracking event config) to be added to each {@link JobSpec}.
* @return a {@link Dag<JobExecutionPlan>}
* @throws SpecNotFoundException
* @throws JobTemplate.TemplateException
* @throws URISyntaxException
*/
public Dag<JobExecutionPlan> asDag(Config sysConfig) throws SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException {
Dag<JobExecutionPlan> flowDag = new Dag<>(new ArrayList<>());
for (List<FlowEdgeContext> path: paths) {
Dag<JobExecutionPlan> pathDag = new Dag<>(new ArrayList<>());
Iterator<FlowEdgeContext> pathIterator = path.iterator();
while (pathIterator.hasNext()) {
Dag<JobExecutionPlan> flowEdgeDag = convertHopToDag(pathIterator.next(), sysConfig);
pathDag = concatenate(pathDag, flowEdgeDag);
}
flowDag = flowDag.merge(pathDag);
}
return flowDag;
}
/**
* Concatenate two {@link Dag}s. Modify the {@link ConfigurationKeys#JOB_DEPENDENCIES} in the {@link JobSpec}s of the child
* {@link Dag} to reflect the concatenation operation.
* @param dagLeft The parent dag.
* @param dagRight The child dag.
* @return The concatenated dag with modified {@link ConfigurationKeys#JOB_DEPENDENCIES}.
*/
@VisibleForTesting
static Dag<JobExecutionPlan> concatenate(Dag<JobExecutionPlan> dagLeft, Dag<JobExecutionPlan> dagRight) {
//Compute the fork nodes - set of nodes with no dependents in the concatenated dag.
Set<DagNode<JobExecutionPlan>> forkNodes = dagLeft.getEndNodes().stream().
filter(endNode -> isNodeForkable(endNode)).collect(Collectors.toSet());
Set<DagNode<JobExecutionPlan>> dependencyNodes = dagLeft.getDependencyNodes(forkNodes);
if (!dependencyNodes.isEmpty()) {
List<String> dependenciesList = dependencyNodes.stream()
.map(dagNode -> dagNode.getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY))
.collect(Collectors.toList());
String dependencies = Joiner.on(",").join(dependenciesList);
for (DagNode<JobExecutionPlan> childNode : dagRight.getStartNodes()) {
JobSpec jobSpec = childNode.getValue().getJobSpec();
jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.JOB_DEPENDENCIES,
ConfigValueFactory.fromAnyRef(dependencies)));
}
}
return dagLeft.concatenate(dagRight, forkNodes);
}
private static boolean isNodeForkable(DagNode<JobExecutionPlan> dagNode) {
Config jobConfig = dagNode.getValue().getJobSpec().getConfig();
return ConfigUtils.getBoolean(jobConfig, ConfigurationKeys.JOB_FORK_ON_CONCAT, false);
}
/**
* Given an instance of {@link FlowEdge}, this method returns a {@link Dag < JobExecutionPlan >} that moves data
* from the source of the {@link FlowEdge} to the destination of the {@link FlowEdge}.
* @param flowEdgeContext an instance of {@link FlowEdgeContext}.
* @param sysConfig environment config.
* @return a {@link Dag} of {@link JobExecutionPlan}s associated with the {@link FlowEdge}.
*/
private Dag<JobExecutionPlan> convertHopToDag(FlowEdgeContext flowEdgeContext, Config sysConfig)
throws SpecNotFoundException, JobTemplate.TemplateException, URISyntaxException {
FlowTemplate flowTemplate = flowEdgeContext.getEdge().getFlowTemplate();
DatasetDescriptor inputDatasetDescriptor = flowEdgeContext.getInputDatasetDescriptor();
DatasetDescriptor outputDatasetDescriptor = flowEdgeContext.getOutputDatasetDescriptor();
Config mergedConfig = flowEdgeContext.getMergedConfig();
SpecExecutor specExecutor = flowEdgeContext.getSpecExecutor();
//Get resolved job configs from the flow template
List<Config> resolvedJobConfigs = flowTemplate.getResolvedJobConfigs(mergedConfig, inputDatasetDescriptor, outputDatasetDescriptor);
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>(resolvedJobConfigs.size());
Map<String, String> templateToJobNameMap = Maps.newHashMapWithExpectedSize(resolvedJobConfigs.size());
//Iterate over each resolved job config and convert the config to a JobSpec.
for (Config resolvedJobConfig : resolvedJobConfigs) {
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan.Factory().createPlan(flowSpec, resolvedJobConfig, specExecutor, flowExecutionId, sysConfig);
jobExecutionPlans.add(jobExecutionPlan);
templateToJobNameMap.put(getJobTemplateName(jobExecutionPlan), jobExecutionPlan.getJobSpec().getConfig().getString(
ConfigurationKeys.JOB_NAME_KEY));
}
updateJobDependencies(jobExecutionPlans, templateToJobNameMap);
return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
}
/**
* The job template name is derived from the {@link org.apache.gobblin.runtime.api.JobTemplate} URI. It is the
* simple name of the path component of the URI.
* @param jobExecutionPlan
* @return the simple name of the job template from the URI of its path.
*/
private static String getJobTemplateName(JobExecutionPlan jobExecutionPlan) {
Optional<URI> jobTemplateUri = jobExecutionPlan.getJobSpec().getTemplateURI();
if (jobTemplateUri.isPresent()) {
return Files.getNameWithoutExtension(new Path(jobTemplateUri.get()).getName());
} else {
return null;
}
}
/**
* A method to modify the {@link ConfigurationKeys#JOB_DEPENDENCIES} specified in a {@link JobTemplate} to those
* which are usable in a {@link JobSpec}.
* The {@link ConfigurationKeys#JOB_DEPENDENCIES} specified in a JobTemplate use the JobTemplate names
* (i.e. the file names of the templates without the extension). However, the same {@link FlowTemplate} may be used
* across multiple {@link FlowEdge}s. To ensure that we capture dependencies between jobs correctly as Dags from
* successive hops are merged, we translate the {@link JobTemplate} name specified in the dependencies config to
* {@link ConfigurationKeys#JOB_NAME_KEY} from the corresponding {@link JobSpec}, which is guaranteed to be globally unique.
* For example, consider a {@link JobTemplate} with URI job1.job which has "job.dependencies=job2,job3" (where job2.job and job3.job are
* URIs of other {@link JobTemplate}s). Also, let the job.name config for the three jobs (after {@link JobSpec} is compiled) be as follows:
* "job.name=flowgrp1_flowName1_jobName1_1111", "job.name=flowgrp1_flowName1_jobName2_1121", and "job.name=flowgrp1_flowName1_jobName3_1131". Then,
* for job1, this method will set "job.dependencies=flowgrp1_flowName1_jobName2_1121, flowgrp1_flowName1_jobName3_1131".
* @param jobExecutionPlans a list of {@link JobExecutionPlan}s
* @param templateToJobNameMap a HashMap that has the mapping from the {@link JobTemplate} names to job.name in corresponding
* {@link JobSpec}
*/
private void updateJobDependencies(List<JobExecutionPlan> jobExecutionPlans, Map<String, String> templateToJobNameMap) {
for (JobExecutionPlan jobExecutionPlan: jobExecutionPlans) {
JobSpec jobSpec = jobExecutionPlan.getJobSpec();
if (jobSpec.getConfig().hasPath(ConfigurationKeys.JOB_DEPENDENCIES)) {
List<String> jobDependencies = ConfigUtils.getStringList(jobSpec.getConfig(), ConfigurationKeys.JOB_DEPENDENCIES);
List<String> updatedDependenciesList = new ArrayList<>(jobDependencies.size());
for (String dependency : jobDependencies) {
if (!templateToJobNameMap.containsKey(dependency)) {
//We should never hit this condition. The logic here is a safety check.
throw new RuntimeException("TemplateToJobNameMap does not contain dependency " + dependency);
}
updatedDependenciesList.add(templateToJobNameMap.get(dependency));
}
String updatedDependencies = Joiner.on(",").join(updatedDependenciesList);
jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef(updatedDependencies)));
}
}
}
}
| 3,925 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/MultiHopFlowCompiler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import java.lang.reflect.InvocationTargetException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ServiceManager;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraph;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
import org.apache.gobblin.service.modules.flowgraph.FlowGraph;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphMonitor;
import org.apache.gobblin.service.modules.flowgraph.pathfinder.PathFinder;
import org.apache.gobblin.service.modules.restli.FlowConfigUtils;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.template_catalog.ObservingFSFlowEdgeTemplateCatalog;
import org.apache.gobblin.service.modules.template_catalog.UpdatableFSFlowTemplateCatalog;
import org.apache.gobblin.service.monitoring.GitFlowGraphMonitor;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
/***
* Take in a logical {@link Spec} ie flow and compile corresponding materialized job {@link Spec}
* and its mapping to {@link SpecExecutor}.
*/
@Alpha
@Slf4j
public class MultiHopFlowCompiler extends BaseFlowToJobSpecCompiler {
private AtomicReference<FlowGraph> flowGraph;
@Getter
private ServiceManager serviceManager;
@Getter
private CountDownLatch initComplete = new CountDownLatch(1);
private FlowGraphMonitor flowGraphMonitor;
private ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
private DataMovementAuthorizer dataMovementAuthorizer;
private Map<String, String> dataNodeAliasMap = new HashMap<>();
// a map to hold aliases of data nodes, e.g. gobblin.service.datanode.aliases.map=node1-dev:node1,node1-stg:node1,node1-prod:node1
public static final String DATA_NODE_ID_TO_ALIAS_MAP = ServiceConfigKeys.GOBBLIN_SERVICE_PREFIX + "datanode.aliases.map";
public MultiHopFlowCompiler(Config config) {
this(config, true);
}
public MultiHopFlowCompiler(Config config, boolean instrumentationEnabled) {
this(config, Optional.<Logger>absent(), instrumentationEnabled);
}
public MultiHopFlowCompiler(Config config, Optional<Logger> log) {
this(config, log, true);
}
public MultiHopFlowCompiler(Config config, AtomicReference<FlowGraph> flowGraph) {
super(config, Optional.absent(), true);
this.flowGraph = flowGraph;
this.dataMovementAuthorizer = new NoopDataMovementAuthorizer(config);
}
public MultiHopFlowCompiler(Config config, Optional<Logger> log, boolean instrumentationEnabled) {
super(config, log, instrumentationEnabled);
try {
this.dataNodeAliasMap = config.hasPath(DATA_NODE_ID_TO_ALIAS_MAP)
? Splitter.on(",").withKeyValueSeparator(":").split(config.getString(DATA_NODE_ID_TO_ALIAS_MAP))
: new HashMap<>();
} catch (RuntimeException e) {
MultiHopFlowCompiler.log.warn("Exception reading data node alias map, ignoring it.", e);
}
// Use atomic reference to avoid partial flowgraph upgrades during path compilation.
this.flowGraph = new AtomicReference<>(new BaseFlowGraph(dataNodeAliasMap));
Optional<? extends UpdatableFSFlowTemplateCatalog> flowTemplateCatalog;
if (config.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)
&& StringUtils.isNotBlank(config.getString(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY))) {
try {
String flowTemplateCatalogClassName = ConfigUtils.getString(this.config, ServiceConfigKeys.TEMPLATE_CATALOGS_CLASS_KEY, ObservingFSFlowEdgeTemplateCatalog.class.getCanonicalName());
flowTemplateCatalog = Optional.of(
(UpdatableFSFlowTemplateCatalog) ConstructorUtils.invokeConstructor(Class.forName(new ClassAliasResolver<>(UpdatableFSFlowTemplateCatalog.class)
.resolve(flowTemplateCatalogClassName)), config, rwLock));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) {
throw new RuntimeException("Cannot instantiate " + getClass().getName(), e);
}
} else {
return;
}
Config gitFlowGraphConfig = this.config;
if (this.config.hasPath(ConfigurationKeys.ENCRYPT_KEY_LOC)) {
//Add encrypt.key.loc config to the config passed to GitFlowGraphMonitor
gitFlowGraphConfig = this.config
.withValue(GitFlowGraphMonitor.GIT_FLOWGRAPH_MONITOR_PREFIX + "." + ConfigurationKeys.ENCRYPT_KEY_LOC, config.getValue(ConfigurationKeys.ENCRYPT_KEY_LOC));
}
try {
String dataMovementAuthorizerClassName = ConfigUtils.getString(this.config, ServiceConfigKeys.DATA_MOVEMENT_AUTHORIZER_CLASS,
NoopDataMovementAuthorizer.class.getCanonicalName());
this.dataMovementAuthorizer = (DataMovementAuthorizer) ConstructorUtils.invokeConstructor(Class.forName(new ClassAliasResolver<>(DataMovementAuthorizer.class).resolve(dataMovementAuthorizerClassName)), this.config);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
try {
String flowGraphMonitorClassName = ConfigUtils.getString(this.config, ServiceConfigKeys.GOBBLIN_SERVICE_FLOWGRAPH_CLASS_KEY, GitFlowGraphMonitor.class.getCanonicalName());
this.flowGraphMonitor = (FlowGraphMonitor) ConstructorUtils.invokeConstructor(Class.forName(new ClassAliasResolver<>(FlowGraphMonitor.class).resolve(
flowGraphMonitorClassName)), gitFlowGraphConfig, flowTemplateCatalog, this, this.topologySpecMap, this.getInitComplete(), instrumentationEnabled);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
this.serviceManager = (flowTemplateCatalog.isPresent() && flowTemplateCatalog.get() instanceof ObservingFSFlowEdgeTemplateCatalog) ?
new ServiceManager(Lists.newArrayList(this.flowGraphMonitor, flowTemplateCatalog.get())) : new ServiceManager(Lists.newArrayList(this.flowGraphMonitor));
addShutdownHook();
//Start the git flow graph monitor
try {
this.serviceManager.startAsync().awaitHealthy(5, TimeUnit.SECONDS);
} catch (TimeoutException te) {
MultiHopFlowCompiler.log.error("Timed out while waiting for the service manager to start up", te);
throw new RuntimeException(te);
}
}
/**
* Mark the {@link SpecCompiler} as active. This in turn activates the {@link GitFlowGraphMonitor}, allowing to start polling
* and processing changes
* @param active
*/
@Override
public void setActive(boolean active) {
super.setActive(active);
if (this.flowGraphMonitor != null) {
this.flowGraphMonitor.setActive(active);
}
}
@Override
public void awaitHealthy() throws InterruptedException {
if (this.getInitComplete().getCount() > 0) {
log.info("Waiting for the MultiHopFlowCompiler to become healthy..");
this.getInitComplete().await();
log.info("The MultihopFlowCompiler is healthy and ready to orchestrate flows.");
}
return;
}
/**
* j
* @param spec an instance of {@link FlowSpec}.
* @return A DAG of {@link JobExecutionPlan}s, which encapsulates the compiled {@link org.apache.gobblin.runtime.api.JobSpec}s
* together with the {@link SpecExecutor} where the job can be executed.
*/
@Override
public Dag<JobExecutionPlan> compileFlow(Spec spec) {
Preconditions.checkNotNull(spec);
Preconditions.checkArgument(spec instanceof FlowSpec, "MultiHopFlowCompiler only accepts FlowSpecs");
FlowGraph graph = this.flowGraph.get();
long startTime = System.nanoTime();
FlowSpec flowSpec = (FlowSpec) spec;
String source = FlowConfigUtils.getDataNode(flowSpec.getConfig(), ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, this.dataNodeAliasMap);
String destination = FlowConfigUtils.getDataNode(flowSpec.getConfig(), ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, this.dataNodeAliasMap);
DataNode sourceNode = graph.getNode(source);
if (sourceNode == null) {
flowSpec.addCompilationError(source, destination, String.format("Flowgraph does not have a node with id %s", source));
return null;
}
List<String> destNodeIds = FlowConfigUtils.getDataNodes(flowSpec.getConfig(), ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, this.dataNodeAliasMap);
List<DataNode> destNodes = destNodeIds.stream().map(graph::getNode).collect(Collectors.toList());
if (destNodes.contains(null)) {
flowSpec.addCompilationError(source, destination, String.format("Flowgraph does not have a node with id %s", destNodeIds.get(destNodes.indexOf(null))));
return null;
}
log.info(String.format("Compiling flow for source: %s and destination: %s", source, destination));
List<FlowSpec> flowSpecs = splitFlowSpec(flowSpec);
Dag<JobExecutionPlan> jobExecutionPlanDag = new Dag<>(new ArrayList<>());
try {
this.rwLock.readLock().lock();
for (FlowSpec datasetFlowSpec : flowSpecs) {
for (DataNode destNode : destNodes) {
long authStartTime = System.nanoTime();
try {
boolean authorized = this.dataMovementAuthorizer.isMovementAuthorized(flowSpec, sourceNode, destNode);
Instrumented.updateTimer(dataAuthorizationTimer, System.nanoTime() - authStartTime, TimeUnit.NANOSECONDS);
if (!authorized) {
String message = String.format("Data movement is not authorized for flow: %s, source: %s, destination: %s",
flowSpec.getUri().toString(), source, destination);
log.error(message);
datasetFlowSpec.addCompilationError(source, destination, message);
return null;
}
} catch (Exception e) {
Instrumented.markMeter(flowCompilationFailedMeter);
datasetFlowSpec.addCompilationError(source, destination, Throwables.getStackTraceAsString(e));
return null;
}
}
//Compute the path from source to destination.
FlowGraphPath flowGraphPath = graph.findPath(datasetFlowSpec);
if (flowGraphPath != null) {
//Convert the path into a Dag of JobExecutionPlans.
jobExecutionPlanDag = jobExecutionPlanDag.merge(flowGraphPath.asDag(this.config));
}
}
if (jobExecutionPlanDag.isEmpty()) {
Instrumented.markMeter(flowCompilationFailedMeter);
String message = String.format("No path found from source: %s and destination: %s", source, destination);
log.info(message);
if (!flowSpec.getCompilationErrors().stream().anyMatch(compilationError -> compilationError.errorPriority == 0)) {
flowSpec.addCompilationError(source, destination, message);
}
return null;
}
} catch (PathFinder.PathFinderException | SpecNotFoundException | JobTemplate.TemplateException | URISyntaxException | ReflectiveOperationException e) {
Instrumented.markMeter(flowCompilationFailedMeter);
String message = String.format("Exception encountered while compiling flow for source: %s and destination: %s, %s", source, destination, Throwables.getStackTraceAsString(e));
log.error(message, e);
flowSpec.addCompilationError(source, destination, message);
return null;
} finally {
this.rwLock.readLock().unlock();
}
Instrumented.markMeter(flowCompilationSuccessFulMeter);
Instrumented.updateTimer(flowCompilationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
// Clear compilation errors now that compilation is successful
flowSpec.clearCompilationErrors();
return jobExecutionPlanDag;
}
public void setFlowGraph(FlowGraph flowGraph) {
this.flowGraph.set(flowGraph);
}
/**
* If {@link FlowSpec} has {@link ConfigurationKeys#DATASET_SUBPATHS_KEY}, split it into multiple flowSpecs using a
* provided base input and base output path to generate multiple source/destination paths.
*/
private static List<FlowSpec> splitFlowSpec(FlowSpec flowSpec) {
long flowExecutionId = FlowUtils.getOrCreateFlowExecutionId(flowSpec);
List<FlowSpec> flowSpecs = new ArrayList<>();
Config flowConfig = flowSpec.getConfig();
if (flowConfig.hasPath(ConfigurationKeys.DATASET_SUBPATHS_KEY)) {
List<String> datasetSubpaths = ConfigUtils.getStringList(flowConfig, ConfigurationKeys.DATASET_SUBPATHS_KEY);
String baseInputPath = ConfigUtils.getString(flowConfig, ConfigurationKeys.DATASET_BASE_INPUT_PATH_KEY, "/");
String baseOutputPath = ConfigUtils.getString(flowConfig, ConfigurationKeys.DATASET_BASE_OUTPUT_PATH_KEY, "/");
if (ConfigUtils.getBoolean(flowConfig, ConfigurationKeys.DATASET_COMBINE_KEY, false)) {
Config newConfig = flowConfig.withoutPath(ConfigurationKeys.DATASET_SUBPATHS_KEY)
.withValue(DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX + "." + DatasetDescriptorConfigKeys.PATH_KEY,
ConfigValueFactory.fromAnyRef(baseInputPath))
.withValue(DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX + "." + DatasetDescriptorConfigKeys.PATH_KEY,
ConfigValueFactory.fromAnyRef(baseOutputPath))
.withValue(DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX + ".subPaths",
ConfigValueFactory.fromAnyRef(convertStringListToGlobPattern(datasetSubpaths)))
.withValue(DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX + ".subPaths",
ConfigValueFactory.fromAnyRef(convertStringListToGlobPattern(datasetSubpaths)));
flowSpecs.add(copyFlowSpecWithNewConfig(flowSpec, newConfig));
} else {
for (String subPath : datasetSubpaths) {
Config newConfig = flowConfig.withoutPath(ConfigurationKeys.DATASET_SUBPATHS_KEY)
.withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(flowExecutionId))
.withValue(DatasetDescriptorConfigKeys.FLOW_INPUT_DATASET_DESCRIPTOR_PREFIX + "." + DatasetDescriptorConfigKeys.PATH_KEY,
ConfigValueFactory.fromAnyRef(new Path(baseInputPath, subPath).toString()))
.withValue(DatasetDescriptorConfigKeys.FLOW_OUTPUT_DATASET_DESCRIPTOR_PREFIX + "." + DatasetDescriptorConfigKeys.PATH_KEY,
ConfigValueFactory.fromAnyRef(new Path(baseOutputPath, subPath).toString()));
flowSpecs.add(copyFlowSpecWithNewConfig(flowSpec, newConfig));
}
}
} else {
flowSpecs.add(flowSpec);
}
return flowSpecs;
}
/**
* Convert string list to string pattern that will work for globs.
*
* e.g. ["test1", "test2", test3"] -> "{test1,test2,test}"
*/
private static String convertStringListToGlobPattern(List<String> stringList) {
return "{" + Joiner.on(",").join(stringList) + "}";
}
private static FlowSpec copyFlowSpecWithNewConfig(FlowSpec flowSpec, Config newConfig) {
FlowSpec.Builder builder = FlowSpec.builder(flowSpec.getUri()).withVersion(flowSpec.getVersion())
.withDescription(flowSpec.getDescription()).withConfig(newConfig);
if (flowSpec.getTemplateURIs().isPresent()) {
builder = builder.withTemplates(flowSpec.getTemplateURIs().get());
}
if (flowSpec.getChildSpecs().isPresent()) {
builder = builder.withTemplates(flowSpec.getChildSpecs().get());
}
return builder.build();
}
/**
* Register a shutdown hook for this thread.
*/
private void addShutdownHook() {
ServiceManager manager = this.serviceManager;
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
// Give the services 5 seconds to stop to ensure that we are responsive to shutdown
// requests.
try {
manager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
} catch (TimeoutException timeout) {
// stopping timed out
}
}
});
}
}
| 3,926 |
0 | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/FlowUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flow;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
public class FlowUtils {
/**
* A FlowSpec contains a FlowExecutionId if it is a runOnce flow.
* Refer {@link FlowConfigResourceLocalHandler#createFlowSpecForConfig} for details.
* @param spec flow spec
* @return flow execution id
*/
public static long getOrCreateFlowExecutionId(FlowSpec spec) {
long flowExecutionId;
if (spec.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
flowExecutionId = spec.getConfig().getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
} else {
flowExecutionId = System.currentTimeMillis();
}
return flowExecutionId;
}
} | 3,927 |
0 | Create_ds/gobblin/gobblin-binary-management/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-binary-management/src/test/java/org/apache/gobblin/binary_creation/OrcTestToolsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.binary_creation;
import com.google.common.io.Files;
import java.io.File;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.orc.TypeDescription;
import org.testng.Assert;
import org.testng.annotations.Test;
public class OrcTestToolsTest {
public DataTestTools orcTools = new OrcTestTools();;
@Test
public void test() throws Exception {
String resourceName = "orcWriterTest";
File tmpDir = Files.createTempDir();
FileSystem fs = FileSystem.get(new Configuration());
Path output = new Path(tmpDir.getAbsolutePath(), "test");
orcTools.writeJsonResourceRecordsAsBinary(resourceName, null, output, null);
Assert.assertTrue(orcTools.checkSameFilesAndRecords(orcTools.readAllRecordsInJsonResource(resourceName, null),
orcTools.readAllRecordsInBinaryDirectory(fs, output), true, null, false));
}
@Test
public void testSchemaToTypeInfoConversion() throws Exception {
// Simple non-nested case:
Schema avroSchema = SchemaBuilder.record("test")
.fields()
.name("id")
.type()
.intType()
.noDefault()
.name("timestamp")
.type()
.stringType()
.noDefault()
.endRecord();
TypeInfo orcSchema = OrcTestTools.convertAvroSchemaToOrcSchema(avroSchema);
String targetOrcSchemaString = "struct<id:int,timestamp:string>";
Assert.assertEquals(targetOrcSchemaString, orcSchema.toString());
// Nested case:
avroSchema = SchemaBuilder.record("nested")
.fields()
.name("nestedId")
.type()
.array()
.items()
.stringType()
.noDefault()
.name("timestamp")
.type()
.stringType()
.noDefault()
.endRecord();
orcSchema = OrcTestTools.convertAvroSchemaToOrcSchema(avroSchema);
TypeDescription targetTypeDescription = TypeDescription.createStruct()
.addField("nestedId", TypeDescription.createList(TypeDescription.createString()))
.addField("timestamp", TypeDescription.createString());
Assert.assertEquals(orcSchema.toString().toLowerCase(), targetTypeDescription.toString().toLowerCase());
}
} | 3,928 |
0 | Create_ds/gobblin/gobblin-binary-management/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-binary-management/src/test/java/org/apache/gobblin/binary_creation/AvroTestToolsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.binary_creation;
import com.google.common.io.Files;
import java.io.File;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.binary_creation.AvroTestTools.*;
public class AvroTestToolsTest {
@Test
public void test() throws Exception {
DataTestTools testTools = new AvroTestTools();
String resourceName = "avroWriterTest";
File tmpDir = Files.createTempDir();
FileSystem fs = FileSystem.getLocal(new Configuration());
Path output = new Path(tmpDir.getAbsolutePath(), "test");
testTools.writeJsonResourceRecordsAsBinary(resourceName, fs, output, null);
Assert.assertTrue(testTools.checkSameFilesAndRecords(testTools.readAllRecordsInJsonResource(resourceName, null),
testTools.readAllRecordsInBinaryDirectory(fs, output), false, null, true));
}
@Test
public void testGenericRecordDataComparisonWithoutSchema() throws Exception {
Schema avroSchema = (new Schema.Parser()).parse(
"{\n" + " \"namespace\": \"com.linkedin.compliance.test\",\n" + " \"type\": \"record\",\n"
+ " \"name\": \"SimpleTest\",\n" + " \"fields\": [\n" + " {\n" + " \"name\": \"memberId\",\n"
+ " \"type\": \"int\"\n" + " },\n" + " {\n" + " \"name\": \"name\",\n"
+ " \"type\": \"string\"\n" + " }\n" + " ]\n" + "}");
Schema avroSchemaDiffInNamespace = (new Schema.Parser()).parse(
"{\n" + " \"namespace\": \"com.linkedin.whatever\",\n" + " \"type\": \"record\",\n"
+ " \"name\": \"SimpleTest\",\n" + " \"fields\": [\n" + " {\n" + " \"name\": \"memberId\",\n"
+ " \"type\": \"int\"\n" + " },\n" + " {\n" + " \"name\": \"name\",\n"
+ " \"type\": \"string\"\n" + " }\n" + " ]\n" + "}");
Schema nullableSchema = (new Schema.Parser()).parse(
"{\n" + " \"namespace\": \"com.linkedin.compliance.test\",\n" + " \"type\": \"record\",\n"
+ " \"name\": \"SimpleTest\",\n" + " \"fields\": [\n" + " {\n" + " \"name\": \"memberId\",\n"
+ " \"type\": [\n" + " \"null\",\n" + " \"int\",\n" + " \"string\"\n"
+ " ]\n" + " },\n" + " {\n" + " \"name\": \"name\",\n" + " \"type\": \"string\"\n"
+ " }\n" + " ]\n" + "}");
GenericRecordBuilder builder_0 = new GenericRecordBuilder(avroSchema);
builder_0.set("memberId", "1");
builder_0.set("name", "alice");
GenericData.Record record_0 = builder_0.build();
GenericRecordBuilder builder_1 = new GenericRecordBuilder(avroSchemaDiffInNamespace);
builder_1.set("memberId", "1");
builder_1.set("name", "alice");
GenericData.Record record_1 = builder_1.build();
GenericRecordBuilder builder_2 = new GenericRecordBuilder(avroSchemaDiffInNamespace);
builder_2.set("memberId", "1");
builder_2.set("name", "alice");
GenericData.Record record_2 = builder_2.build();
GenericRecordBuilder builder_3 = new GenericRecordBuilder(avroSchemaDiffInNamespace);
builder_3.set("memberId", "2");
builder_3.set("name", "bob");
GenericData.Record record_3 = builder_3.build();
GenericRecordBuilder builder_4 = new GenericRecordBuilder(nullableSchema);
builder_4.set("memberId", null);
builder_4.set("name", "bob");
GenericData.Record record_4 = builder_4.build();
GenericRecordBuilder builder_5 = new GenericRecordBuilder(nullableSchema);
builder_5.set("memberId", null);
builder_5.set("name", "bob");
GenericData.Record record_5 = builder_5.build();
Assert.assertTrue(!record_0.equals(record_1));
AvroTestTools.GenericRecordWrapper wrapper_0 = new GenericRecordWrapper(record_0);
GenericRecordWrapper wrapper_1 = new GenericRecordWrapper(record_1);
GenericRecordWrapper wrapper_2 = new GenericRecordWrapper(record_2);
GenericRecordWrapper wrapper_3 = new GenericRecordWrapper(record_3);
GenericRecordWrapper wrapper_4 = new GenericRecordWrapper(record_4);
GenericRecordWrapper wrapper_5 = new GenericRecordWrapper(record_5);
Assert.assertEquals(wrapper_0, wrapper_1);
Assert.assertEquals(wrapper_1, wrapper_2);
Assert.assertNotSame(wrapper_2, wrapper_3);
Assert.assertEquals(wrapper_4, wrapper_5);
}
} | 3,929 |
0 | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin/binary_creation/AvroTestTools.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.binary_creation;
import com.google.common.collect.AbstractIterator;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.Spliterators;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.experimental.Delegate;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.mapred.FsInput;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.reflections.Reflections;
import org.reflections.scanners.ResourcesScanner;
import org.reflections.util.ConfigurationBuilder;
/**
* A implementation of {@link DataTestTools} for Avro format.
*/
@Slf4j
public class AvroTestTools extends DataTestTools<AvroTestTools.RecordIterator, Schema> {
public boolean checkSameFilesAndRecords(TreeMap<String, RecordIterator> expected,
TreeMap<String, RecordIterator> observed, boolean allowDifferentOrder, Collection<String> blacklistRecordFields,
boolean allowDifferentSchema) {
Iterator<String> keys1 = expected.navigableKeySet().iterator();
Iterator<String> keys2 = observed.navigableKeySet().iterator();
return compareIterators(keys1, keys2, (key1, key2) -> {
if (!removeExtension(key1).equals(removeExtension(key2))) {
log.error(String.format("Mismatched files: %s and %s", key1, key2));
return false;
}
RecordIterator it1 = expected.get(key1);
RecordIterator it2 = observed.get(key2);
if (!allowDifferentSchema && !it1.getSchema().equals(it2.getSchema())) {
log.error(String.format("Mismatched schemas: %s and %s", key1, key2));
return false;
}
if (allowDifferentOrder) {
Set r1 = allowDifferentSchema
? toSetWithBlacklistedFields(it1, blacklistRecordFields, GenericRecordWrapper::new)
: toSetWithBlacklistedFields(it1, blacklistRecordFields, Function.identity());
Set r2 = allowDifferentSchema
? toSetWithBlacklistedFields(it2, blacklistRecordFields, GenericRecordWrapper::new)
: toSetWithBlacklistedFields(it2, blacklistRecordFields, Function.identity());
if (r1.equals(r2)) {
return true;
} else {
log.info("Sets of records differ.");
return false;
}
} else {
return compareIterators(it1, it2, (r1, r2) -> {
if (blacklistRecordFields != null) {
for (String blacklisted : blacklistRecordFields) {
r1.put(blacklisted, null);
r2.put(blacklisted, null);
}
}
return allowDifferentSchema ?
GenericRecordWrapper.compareGenericRecordRegardlessOfSchema(r1, r2) : r1.equals(r2);
});
}
});
}
private static <T> Set<T> toSetWithBlacklistedFields(Iterator<GenericRecord> it,
Collection<String> blacklistRecordFields, Function<GenericRecord, T> transform) {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(it, 0), false).map(r -> {
for (String blacklisted : blacklistRecordFields) {
r.put(blacklisted, null);
}
return transform.apply(r);
}).collect(Collectors.toSet());
}
/**
* Read all avro records in an HDFS location into a map from file name to {@link RecordIterator}.
*/
@Override
public TreeMap<String, RecordIterator> readAllRecordsInBinaryDirectory(FileSystem fs, Path path)
throws IOException {
TreeMap<String, RecordIterator> output = new TreeMap<>();
if (!fs.exists(path)) {
return output;
}
PathFilter pathFilter = new HiddenFilter();
for (FileStatus status : FileListUtils.listFilesRecursively(fs, path, pathFilter)) {
SeekableInput sin = new FsInput(status.getPath(), fs);
DataFileReader<GenericRecord> dfr = new DataFileReader<>(sin, new GenericDatumReader<>());
String key = PathUtils.relativizePath(status.getPath(), path).toString();
output.put(key, new RecordIterator(dfr.getSchema(), new AbstractIterator<GenericRecord>() {
@Override
protected GenericRecord computeNext() {
if (dfr.hasNext()) {
return dfr.next();
} else {
try {
dfr.close();
} catch (IOException ioe) {
log.error("Failed to close data file reader.", ioe);
}
endOfData();
return null;
}
}
}));
}
return output;
}
/**
* Read all avro records in a json base resource in classpath into a map from file name to {@link RecordIterator}.
*/
@Override
public TreeMap<String, RecordIterator> readAllRecordsInJsonResource(String baseResource, @Nullable Schema schema)
throws IOException {
if (schema == null) {
String schemaResource = new File(baseResource, "schema.avsc").toString();
schema = readAvscSchema(schemaResource, AvroTestTools.class);
}
TreeMap<String, RecordIterator> output = new TreeMap<>();
for (String file : getJsonFileSetByResourceRootName(baseResource)) {
log.info("Reading json record from " + file);
String name = PathUtils.relativizePath(new Path(file), new Path(baseResource)).toString();
String schemaResourceName = new File(new File(file).getParent(), "schema.avsc").toString();
Schema thisSchema = readAvscSchema(schemaResourceName, AvroTestTools.class);
Schema actualSchema = thisSchema == null ? schema : thisSchema;
try (InputStream is = AvroTestTools.class.getClassLoader().getResourceAsStream(file)) {
output.put(name,
readRecordsFromJsonInputStream(actualSchema, is, DecoderFactory.get().jsonDecoder(actualSchema, is)));
}
}
return output;
}
private static RecordIterator readRecordsFromJsonInputStream(Schema schema, InputStream is, Decoder decoder) {
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
return new RecordIterator(schema, new AbstractIterator<GenericRecord>() {
@Override
protected GenericRecord computeNext() {
try {
return reader.read(null, decoder);
} catch (IOException ioe) {
try {
is.close();
} catch (IOException exc) {
log.warn("Failed to close input stream.", exc);
}
endOfData();
return null;
}
}
});
}
/**
* Materialize records in a classpath package into HDFS avro records.
* @param baseResource name of the package. The package should contain the following:
* - Exactly one resource called <name>.avsc containing the schema of the records
* (or an explicit schema passed as an argument).
* - One or more data files called *.json containing the records.
* @param fs the {@link FileSystem} where the records will be written.
* @param targetPath the path where the records will be written.
* @param schema Schema of the records, or null to read automatically from a resource.
* @throws IOException
*/
public Schema writeJsonResourceRecordsAsBinary(String baseResource, FileSystem fs, Path targetPath,
@Nullable Schema schema) throws IOException {
TreeMap<String, RecordIterator> recordMap = readAllRecordsInJsonResource(baseResource, schema);
Schema outputSchema = recordMap.lastEntry().getValue().getSchema();
for (Map.Entry<String, RecordIterator> entry : recordMap.entrySet()) {
writeAsAvroBinary(entry.getValue(), entry.getValue().getSchema(), fs, new Path(targetPath,
removeExtension(entry.getKey()) + ".avro"));
}
return outputSchema;
}
/**
* Read schema from an avsc resource file.
*/
public static Schema readAvscSchema(String resource, Class loadedClass) throws IOException {
try (InputStream is = loadedClass.getClassLoader().getResourceAsStream(resource)) {
return is != null ? new Schema.Parser().parse(is) : null;
}
}
private void writeAsAvroBinary(Iterator<GenericRecord> input, Schema schema, FileSystem fs,
Path outputPath) throws IOException {
DataFileWriter writer = new DataFileWriter(new GenericDatumWriter());
writer.create(schema, fs.create(outputPath, true));
while (input.hasNext()) {
writer.append(input.next());
}
writer.close();
log.info("Successfully wrote avro file to path " + outputPath);
}
/**
* An iterator over {@link GenericRecord} which is also aware of schema.
*/
@AllArgsConstructor
public static class RecordIterator implements Iterator<GenericRecord> {
@Getter
private final Schema schema;
@Delegate
private final Iterator<GenericRecord> it;
}
/**
* A wrapper of {@link GenericRecord} when schema of record is not important in comparison.
*/
@AllArgsConstructor
public static class GenericRecordWrapper {
public GenericRecord record;
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GenericRecordWrapper that = (GenericRecordWrapper) o;
return compareGenericRecordRegardlessOfSchema(record, that.record);
}
@Override
public int hashCode() {
// Getting value object array
int indexLen = record.getSchema().getFields().size();
Object[] objArr = new Object[indexLen];
for (int i = 0; i < indexLen; i++) {
objArr[i] = record.get(i);
}
return Objects.hash(objArr);
}
/**
* Compare two {@link GenericRecord} instance without considering their schema.
* Useful when we want to compare two records by discarding some of fields like header.
*/
static boolean compareGenericRecordRegardlessOfSchema(GenericRecord r1, GenericRecord r2) {
List<Schema.Field> listOfFields1 = r1.getSchema().getFields();
List<Schema.Field> listOfFields2 = r2.getSchema().getFields();
if (listOfFields1.size() != listOfFields2.size()) {
return false;
}
boolean result = true;
for (int i = 0; i < listOfFields1.size(); i++) {
result = result && (
((r1.get(i) == null && r2.get(i) == null)
|| (listOfFields1.get(i).name().equals(listOfFields2.get(i).name())
&& (r1.get(i).equals(r2.get(i)))))
);
}
return result;
}
}
// Package-private methods shared by different format's tool-kit.
static String removeExtension(String string) {
if (string.endsWith(".avro") || string.endsWith(".json")) {
return string.substring(0, string.length() - 5);
}
throw new IllegalArgumentException("Only support avro and json extensions.");
}
static Set<String> getJsonFileSetByResourceRootName(String baseResource) {
Reflections reflections = new Reflections(new ConfigurationBuilder()
.forPackages(baseResource)
.filterInputsBy(name -> name.startsWith(baseResource))
.setScanners(new ResourcesScanner()));
return reflections.getResources(url -> url.endsWith(".json"));
}
public static boolean isResourceExisted(String resource) throws IOException {
return AvroTestTools.class.getClassLoader().getResource(resource) != null;
}
}
| 3,930 |
0 | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin/binary_creation/OrcTestTools.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.binary_creation;
import com.google.common.collect.AbstractIterator;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.experimental.Delegate;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.ql.io.orc.Reader;
import org.apache.hadoop.hive.ql.io.orc.RecordReader;
import org.apache.hadoop.hive.ql.io.orc.Writer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable;
import org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import static org.apache.gobblin.binary_creation.AvroTestTools.*;
// A class that examines ORC-Format file in Purger Integration test.
@Slf4j
public class OrcTestTools extends DataTestTools<OrcTestTools.OrcRowIterator, TypeInfo> {
/**
*
* @param expected
* @param observed
* @param allowDifferentOrder ORC tools will not use this parameter currently.
* @param blacklistRecordFields ORC tools will not use this parameter currently.
* @return If two sets of files are identical.
* Note that there might be an ordering issue in this comparison method. When one is drafting an ORC integration
* test, try to name all json files differently.
*/
@Override
public boolean checkSameFilesAndRecords(TreeMap<String, OrcRowIterator> expected,
TreeMap<String, OrcRowIterator> observed, boolean allowDifferentOrder, Collection<String> blacklistRecordFields,
boolean allowDifferentSchema) {
Iterator<String> keys1 = expected.navigableKeySet().iterator();
Iterator<String> keys2 = observed.navigableKeySet().iterator();
return compareIterators(keys1, keys2, (key1, key2) -> {
// ORC file doesn't have extension by Linkedin's convention.
if (!removeExtension(key1).equals(key2)) {
log.error(String.format("Mismatched files: %s and %s", key1, key2));
return false;
}
OrcRowIterator it1 = expected.get(key1);
OrcRowIterator it2 = observed.get(key2);
if (!it1.getTypeInfo().equals(it2.getTypeInfo())) {
log.error(String.format("Mismatched Typeinfo: %s and %s", key1, key2));
return false;
}
boolean result = true;
while (it1.hasNext()) {
if (!it2.hasNext() || !result) {
return false;
}
result = compareJavaRowAndOrcStruct(((AvroRow) it1.next()).getRow(), (OrcStruct) it2.next());
}
return result;
});
}
/**
* Given the fact that we couldn't access OrcStruct easily, here uses the hacky way(reflection)
* to go around access modifier for integration test purpose only.
* @param realRow A row containing a list of Java objects.
* @param struct An {@link OrcStruct} which essentially is a list of {@link Writable} objects.
*/
private boolean compareJavaRowAndOrcStruct(Object realRow, OrcStruct struct) {
boolean isIdentical = true;
ArrayList<Object> javaObjRow = (ArrayList) realRow;
try {
Field objectArr = OrcStruct.class.getDeclaredField("fields");
objectArr.setAccessible(true);
Object[] dataArr = (Object[]) objectArr.get(struct);
int index = 0;
for (Object dataField : dataArr) {
if (dataField instanceof OrcStruct) {
isIdentical = isIdentical && compareJavaRowAndOrcStruct(javaObjRow.get(index), (OrcStruct) dataField);
} else {
isIdentical = isIdentical && objCastHelper(javaObjRow.get(index), (Writable) dataField);
}
index++;
}
} catch (NoSuchFieldException | IllegalAccessException nfe) {
throw new RuntimeException("Failed in compare a java object row and orcstruct");
}
return isIdentical;
}
/**
* All Writable objects passed in here are guaranteed to be primitive writable objects.
*/
private boolean objCastHelper(Object javaObj, Writable obj) {
if (obj instanceof IntWritable) {
return ((IntWritable) obj).get() == (Integer) javaObj;
} else if (obj instanceof Text) {
return (obj).toString().equals(javaObj);
} else if (obj instanceof LongWritable) {
return ((LongWritable) obj).get() == (Long) javaObj;
} else if (obj instanceof ShortWritable) {
return ((ShortWritable) obj).get() == (Short) javaObj;
} else if (obj instanceof DoubleWritable) {
return ((DoubleWritable) obj).get() == (Double) javaObj;
} else {
throw new RuntimeException("Cannot recognize the writable type, please enrich the castHelper function");
}
}
/**
* Materialize records in a classpath package into HDFS ORC records.
* @param baseResource name of the package. The package should contain the following:
* - Exactly one resource called orcSchema containing the schema of the records
* (or an explicit schema passed as an argument).
* - One or more data files called *.json containing the records.
* Note that .avsc will not be used in Orc related operation.
*
* @param fs
* @param targetPath the path where the records will be written.
* @param schema
* @return
*/
@Override
public TypeInfo writeJsonResourceRecordsAsBinary(String baseResource, @Nullable FileSystem fs, Path targetPath,
@Nullable TypeInfo schema) throws IOException {
TreeMap<String, OrcRowIterator> recordMap = readAllRecordsInJsonResource(baseResource, schema);
TypeInfo outputSchema = recordMap.lastEntry().getValue().getTypeInfo();
for (Map.Entry<String, OrcRowIterator> entry : recordMap.entrySet()) {
writeAsOrcBinary(entry.getValue(), outputSchema, new Path(targetPath, removeExtension(entry.getKey())));
}
return outputSchema;
}
/**
* AvroRow version of writeAsOrcBinary
*/
private void writeAsOrcBinary(OrcRowIterator input, TypeInfo schema, Path outputPath) throws IOException {
Configuration configuration = new Configuration();
// Note that it doesn't support schema evolution at all.
// If the schema in realRow is inconsistent with given schema, writing into disk
// would run into failure.
ObjectInspector oi = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(schema);
OrcFile.WriterOptions options = OrcFile.writerOptions(configuration).inspector(oi);
Writer writer = null;
while (input.hasNext()) {
AvroRow avroRow = (AvroRow) input.next();
if (writer == null) {
options.inspector(avroRow.getInspector());
writer = OrcFile.createWriter(outputPath, options);
}
writer.addRow(avroRow.realRow);
}
if (writer != null) {
writer.close();
}
}
// ORC-File Reading related functions
// There's no GenericRecord for ORC existed(so that OrcStruct even doesn't provide readFields as
// it is responsible to transform a Writable into GenericRecord in Avro world.
@Override
public TreeMap<String, OrcRowIterator> readAllRecordsInJsonResource(String baseResource,
@Nullable TypeInfo schema) throws IOException {
TypeInfo orcSchema;
try {
if (schema == null) {
File schemaFile = new File(baseResource, "schema.avsc");
String schemaResource = schemaFile.toString();
orcSchema = convertAvroSchemaToOrcSchema(readAvscSchema(schemaResource, OrcTestTools.class));
} else {
orcSchema = schema;
}
} catch (SerDeException se) {
throw new RuntimeException("Provided Avro Schema cannot be transformed to ORC schema", se);
}
TreeMap<String, OrcRowIterator> output = new TreeMap<>();
for (String file : getJsonFileSetByResourceRootName(baseResource)) {
log.info("Reading json record from " + file);
String name = PathUtils.relativizePath(new Path(file), new Path(baseResource)).toString();
output.put(name, readRecordsFromJsonInputStream(orcSchema, file));
}
return output;
}
public static class AvroRow implements Writable {
Object realRow;
ObjectInspector inspector;
public AvroRow(Object row, ObjectInspector inspector) {
this.realRow = row;
this.inspector = inspector;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
throw new UnsupportedOperationException("can't write the bundle");
}
@Override
public void readFields(DataInput dataInput) throws IOException {
throw new UnsupportedOperationException("can't read the bundle");
}
ObjectInspector getInspector() {
return inspector;
}
Object getRow() {
return realRow;
}
}
/**
* Deserialize json object into a list of java object as a row, and transform each of java object
* into {@link Writable} counterpart for constructing {@link OrcStruct}, in convenience of Orc reading and writing.
*
* @param typeInfo The ORC schema in {@link TypeInfo} format.
* @param file The file name in String format.
* @return
*/
private OrcRowIterator readRecordsFromJsonInputStream(TypeInfo typeInfo, String file) throws IOException {
InputStream is = OrcTestTools.class.getClassLoader().getResourceAsStream(file);
// This getParent.getParent is dirty due to we need to simulate multiple-partitions scenarios in iTest.
String schemaResourceName = new File(new File(file).getParentFile().getParent(), "schema.avsc").toString();
Schema attemptedSchema = readAvscSchema(schemaResourceName, OrcTestTools.class);
final Schema avroSchema =
attemptedSchema == null ? readAvscSchema(new File(new File(file).getParent(), "schema.avsc").toString(),
OrcTestTools.class) : attemptedSchema;
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(avroSchema);
Decoder decoder = DecoderFactory.get().jsonDecoder(avroSchema, is);
return new OrcRowIterator(typeInfo, new AbstractIterator<Writable>() {
@Override
protected Writable computeNext() {
try {
GenericRecord record = reader.read(null, decoder);
return getAvroWritable(record, avroSchema);
} catch (IOException e) {
try {
is.close();
} catch (IOException ioec) {
log.warn("Failed to read record from inputstream, will close it immediately", ioec);
}
endOfData();
return null;
}
}
});
}
/**
* From each record, transformed to {@link AvroRow} object for writing.
* One can also choose to use OrcSerDe to obtain ORC-associated writable object.
*
* Using return object of this method would enable a self-maintained ORC writer(not from OrcOutputFormat)
* to write object.
*/
private Writable getAvroWritable(GenericRecord record, Schema avroSchema) {
try {
// Construct AvroSerDe with proper schema and deserialize into Hive object.
AvroSerDe serDe = new AvroSerDe();
Properties propertiesWithSchema = new Properties();
propertiesWithSchema.setProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(),
avroSchema.toString());
serDe.initialize(null, propertiesWithSchema);
AvroGenericRecordWritable avroGenericRecordWritable = new AvroGenericRecordWritable(record);
avroGenericRecordWritable.setFileSchema(avroSchema);
Object avroDeserialized = serDe.deserialize(avroGenericRecordWritable);
ObjectInspector avroOI = new AvroObjectInspectorGenerator(avroSchema).getObjectInspector();
return new AvroRow(avroDeserialized, avroOI);
} catch (SerDeException se) {
throw new RuntimeException("Failed in SerDe exception:", se);
}
}
/**
* Reading ORC file into in-memory representation.
*/
@Override
public TreeMap<String, OrcRowIterator> readAllRecordsInBinaryDirectory(FileSystem fs, Path path) throws IOException {
TreeMap<String, OrcRowIterator> output = new TreeMap<>();
if (!fs.exists(path)) {
return output;
}
PathFilter pathFilter = new HiddenFilter();
for (FileStatus status : FileListUtils.listFilesRecursively(fs, path, pathFilter)) {
String key = PathUtils.relativizePath(status.getPath(), path).toString();
Reader orcReader = OrcFile.createReader(fs, status.getPath());
RecordReader recordReader = orcReader.rows();
output.put(key, new OrcRowIterator(TypeInfoUtils.getTypeInfoFromObjectInspector(orcReader.getObjectInspector()),
new AbstractIterator<Writable>() {
@Override
protected Writable computeNext() {
try {
if (recordReader.hasNext()) {
return (Writable) recordReader.next(null);
} else {
recordReader.close();
endOfData();
return null;
}
} catch (IOException ioe) {
log.warn("Failed to process orc record reader, will terminate reader immediately", ioe);
endOfData();
return null;
}
}
}));
}
return output;
}
/**
* An iterator over {@link OrcStruct} which is also aware of schema( Represented in {@link TypeInfo}).
*/
@AllArgsConstructor
public static class OrcRowIterator implements Iterator<Writable> {
@Getter
private final TypeInfo typeInfo;
@Delegate
private final Iterator<Writable> it;
}
/**
* Convert Avro schema into TypeInfo.
* Current version of Hive used by Gobblin open-source(1.0.1) doesn't have {@link org.apache.orc.TypeDescription}
* and utilities associated with it. So instead {@link TypeInfo} is being used to represent Orc schema.
* Note that {@link TypeInfo} is not case preserving as it is actually the internal schema representation of Hive.
*/
public static TypeInfo convertAvroSchemaToOrcSchema(Schema avroSchema) throws SerDeException {
return TypeInfoUtils.getTypeInfoFromObjectInspector(
new AvroObjectInspectorGenerator(avroSchema).getObjectInspector());
}
} | 3,931 |
0 | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-binary-management/src/main/java/org/apache/gobblin/binary_creation/DataTestTools.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.binary_creation;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.TreeMap;
import java.util.function.BiFunction;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* A ToolKit that will be used for:
* - Creating binary-format file(Avro, ORC) using records declared in txt(.json) file, and schema defined in .avsc file.
* - Deserializing binary-format file into traversable in-memory objects.
* - Verifying if contents in two binary-format file are identical with certain constraints.
*
*
* @param <T> Iterator containing specific type of a record row,
* e.g. {@link org.apache.avro.generic.GenericRecord} for Avro.
* @param <S> Schema type of a specific data format.
*/
@Slf4j
public abstract class DataTestTools<T, S> {
/**
* Verify that the two inputs contain the same records in the same file names. Any fields listed in
* blacklistRecordFields will not be used for comparison.
* Note that this method is destructive to the input records.
* @param expected Expected records map, keyed by file name.
* @param observed Observed records map, keyed by file name
* @param allowDifferentOrder True if allowing fields arranged in different order in comparison of two records.
* @param blacklistRecordFields Configurable set of fields that won't be included for comparison of two records.
* @param allowDifferentSchema True if schema info (for avro, schema can contain attributes which is not necessary
* to be included for comparison)
* @return
*/
public abstract boolean checkSameFilesAndRecords(TreeMap<String, T> expected, TreeMap<String, T> observed,
boolean allowDifferentOrder, Collection<String> blacklistRecordFields, boolean allowDifferentSchema);
/**
* Write a resource file under a certain path as specified binary format file, like Avro, ORC.
* @param baseResource Resource folder that contain JSON files.
* @param fs
* @param targetPath Output Path.
* @param schema The schema of outputed binary file
* @return
* @throws IOException
*/
public abstract S writeJsonResourceRecordsAsBinary(String baseResource, FileSystem fs, Path targetPath, S schema)
throws IOException;
/**
* Read all records in a json base resource in classpath into a map from file name to iterator of T object.
* @param baseResource Base path of the resource directory that contains json file.
* @param schema The schema of records.
* @return A map between file name to an iterator of objects contained in path.
*/
public abstract TreeMap<String, T> readAllRecordsInJsonResource(String baseResource, S schema) throws IOException;
/**
* Read binary-format records into a map from file name to an iterator of T object.
* @param fs File system object.
* @param path File path
* @return A map between file name to an iterator of objects contained in path.
* @throws IOException
*/
public abstract TreeMap<String, T> readAllRecordsInBinaryDirectory(FileSystem fs, Path path) throws IOException;
/**
* Compare two iterators in T type.
*/
<T> boolean compareIterators(Iterator<T> expected, Iterator<T> observed, BiFunction<T, T, Boolean> comparator) {
while (expected.hasNext()) {
if (!observed.hasNext()) {
log.error("Expected has more elements than observed.");
return false;
}
T t1 = expected.next();
T t2 = observed.next();
boolean equals = comparator == null ? t1.equals(t2) : comparator.apply(t1, t2);
if (!equals) {
log.error(String.format("Mismatch: %s does not equal %s.", t1, t2));
return false;
}
}
if (observed.hasNext()) {
log.error("Observed has more elements than expected.");
return false;
}
return true;
}
}
| 3,932 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/GobblinTemporalConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.temporal.workflows.helloworld.HelloWorldJobLauncher;
import org.apache.gobblin.temporal.workflows.helloworld.HelloWorldWorker;
/**
* A central place for configuration related constants of a Gobblin Temporal.
*/
@Alpha
public interface GobblinTemporalConfigurationKeys {
String PREFIX = "gobblin.temporal.";
String WORKER_CLASS = PREFIX + "worker.class";
String DEFAULT_WORKER_CLASS = HelloWorldWorker.class.getName();
String GOBBLIN_TEMPORAL_NAMESPACE = PREFIX + "namespace";
String DEFAULT_GOBBLIN_TEMPORAL_NAMESPACE = PREFIX + "namespace";
String GOBBLIN_TEMPORAL_TASK_QUEUE = PREFIX + "task.queue.name";
String DEFAULT_GOBBLIN_TEMPORAL_TASK_QUEUE = "GobblinTemporalTaskQueue";
String GOBBLIN_TEMPORAL_JOB_LAUNCHER_PREFIX = PREFIX + "job.launcher.";
String GOBBLIN_TEMPORAL_JOB_LAUNCHER_CLASS = GOBBLIN_TEMPORAL_JOB_LAUNCHER_PREFIX + "class";
String DEFAULT_GOBBLIN_TEMPORAL_JOB_LAUNCHER_CLASS = HelloWorldJobLauncher.class.getName();
String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX = GOBBLIN_TEMPORAL_JOB_LAUNCHER_PREFIX + "arg.";
/**
* Number of worker processes to spin up per task runner
* NOTE: If this size is too large, your container can OOM and halt execution unexpectedly. It's recommended not to touch
* this parameter
*/
String TEMPORAL_NUM_WORKERS_PER_CONTAINER = PREFIX + "num.workers.per.container";
int DEFAULT_TEMPORAL_NUM_WORKERS_PER_CONTAINERS = 1;
String TEMPORAL_CONNECTION_STRING = PREFIX + "connection.string";
}
| 3,933 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/cluster/AbstractTemporalWorker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.cluster;
import java.util.Arrays;
import com.typesafe.config.Config;
import io.temporal.client.WorkflowClient;
import io.temporal.worker.Worker;
import io.temporal.worker.WorkerFactory;
import io.temporal.worker.WorkerOptions;
import org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
/** Basic boilerplate for a {@link TemporalWorker} to register its activity and workflow capabilities and listen on a particular queue */
public abstract class AbstractTemporalWorker implements TemporalWorker {
private final WorkflowClient workflowClient;
private final String queueName;
private final WorkerFactory workerFactory;
private final Config config;
public AbstractTemporalWorker(Config cfg, WorkflowClient client) {
config = cfg;
workflowClient = client;
queueName = ConfigUtils.getString(cfg,
GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_TASK_QUEUE,
GobblinTemporalConfigurationKeys.DEFAULT_GOBBLIN_TEMPORAL_TASK_QUEUE);
// Create a Worker factory that can be used to create Workers that poll specific Task Queues.
workerFactory = WorkerFactory.newInstance(workflowClient);
stashWorkerConfig(cfg);
}
@Override
public void start() {
Worker worker = workerFactory.newWorker(queueName, createWorkerOptions());
// This Worker hosts both Workflow and Activity implementations.
// Workflows are stateful, so you need to supply a type to create instances.
worker.registerWorkflowImplementationTypes(getWorkflowImplClasses());
// Activities are stateless and thread safe, so a shared instance is used.
worker.registerActivitiesImplementations(getActivityImplInstances());
// Start polling the Task Queue.
workerFactory.start();
}
@Override
public void shutdown() {
workerFactory.shutdown();
}
protected WorkerOptions createWorkerOptions() {
return null;
}
/** @return workflow types for *implementation* classes (not interface) */
protected abstract Class<?>[] getWorkflowImplClasses();
/** @return activity instances; NOTE: activities must be stateless and thread-safe, so a shared instance is used. */
protected abstract Object[] getActivityImplInstances();
private final void stashWorkerConfig(Config cfg) {
// stash in association with...
WorkerConfig.forWorker(this.getClass(), cfg); // the worker itself
Arrays.stream(getWorkflowImplClasses()).forEach(clazz -> WorkerConfig.withImpl(clazz, cfg)); // its workflow impls
Arrays.stream(getActivityImplInstances()).forEach(obj -> WorkerConfig.withImpl(obj.getClass(), cfg)); // its activity impls
}
}
| 3,934 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/cluster/GobblinTemporalTaskRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.cluster;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.api.client.repackaged.com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import io.temporal.client.WorkflowClient;
import io.temporal.serviceclient.WorkflowServiceStubs;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.ContainerHealthCheckException;
import org.apache.gobblin.cluster.ContainerHealthMetricsService;
import org.apache.gobblin.cluster.ContainerMetrics;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.TaskRunnerSuiteBase;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.RootMetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator;
import org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys;
import org.apache.gobblin.temporal.workflows.client.TemporalWorkflowClientFactory;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.FileUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.TaskEventMetadataUtils;
import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* The main class running in the containers managing services for running Gobblin
* {@link org.apache.gobblin.source.workunit.WorkUnit}s.
*
* <p>
* If for some reason, the container exits or gets killed, the {@link GobblinClusterManager} will
* be notified for the completion of the container and will start a new container to replace this one.
* </p>
*
*/
@Alpha
public class GobblinTemporalTaskRunner implements StandardMetricsBridge {
// Working directory key for applications. This config is set dynamically.
public static final String CLUSTER_APP_WORK_DIR = GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX + "appWorkDir";
private static final Logger logger = LoggerFactory.getLogger(GobblinTemporalTaskRunner.class);
static final java.nio.file.Path CLUSTER_CONF_PATH = Paths.get("generated-gobblin-cluster.conf");
private final Optional<ContainerMetrics> containerMetrics;
private final Path appWorkPath;
private boolean isTaskDriver;
@Getter
private volatile boolean started = false;
private volatile boolean stopInProgress = false;
private volatile boolean isStopped = false;
@Getter
@Setter
private volatile boolean healthCheckFailed = false;
protected final String taskRunnerId;
protected final EventBus eventBus = new EventBus(GobblinTemporalTaskRunner.class.getSimpleName());
protected final Config clusterConfig;
@Getter
protected final FileSystem fs;
protected final String applicationName;
protected final String applicationId;
protected final int numTemporalWorkers;
protected final String temporalQueueName;
private final boolean isMetricReportingFailureFatal;
private final boolean isEventReportingFailureFatal;
private final List<TemporalWorker> workers;
public GobblinTemporalTaskRunner(String applicationName,
String applicationId,
String taskRunnerId,
Config config,
Optional<Path> appWorkDirOptional) throws Exception {
GobblinClusterUtils.setSystemProperties(config);
//Add dynamic config
config = GobblinClusterUtils.addDynamicConfig(config);
this.isTaskDriver = ConfigUtils.getBoolean(config, GobblinClusterConfigurationKeys.TASK_DRIVER_ENABLED,false);
this.taskRunnerId = taskRunnerId;
this.applicationName = applicationName;
this.applicationId = applicationId;
Configuration conf = HadoopUtils.newConfiguration();
this.fs = GobblinClusterUtils.buildFileSystem(config, conf);
this.appWorkPath = initAppWorkDir(config, appWorkDirOptional);
this.clusterConfig = saveConfigToFile(config);
logger.info("Configured GobblinTaskRunner work dir to: {}", this.appWorkPath.toString());
this.containerMetrics = buildContainerMetrics();
this.numTemporalWorkers = ConfigUtils.getInt(config, GobblinTemporalConfigurationKeys.TEMPORAL_NUM_WORKERS_PER_CONTAINER,
GobblinTemporalConfigurationKeys.DEFAULT_TEMPORAL_NUM_WORKERS_PER_CONTAINERS);
this.temporalQueueName = ConfigUtils.getString(config, GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_TASK_QUEUE,
GobblinTemporalConfigurationKeys.DEFAULT_GOBBLIN_TEMPORAL_TASK_QUEUE);
this.isMetricReportingFailureFatal = ConfigUtils.getBoolean(this.clusterConfig,
ConfigurationKeys.GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL,
ConfigurationKeys.DEFAULT_GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL);
this.isEventReportingFailureFatal = ConfigUtils.getBoolean(this.clusterConfig,
ConfigurationKeys.GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL,
ConfigurationKeys.DEFAULT_GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL);
this.workers = new ArrayList<>();
logger.info("GobblinTaskRunner({}): applicationName {}, applicationId {}, taskRunnerId {}, config {}, appWorkDir {}",
this.isTaskDriver ? "taskDriver" : "worker",
applicationName,
applicationId,
taskRunnerId,
config,
appWorkDirOptional);
}
public TaskRunnerSuiteBase.Builder getTaskRunnerSuiteBuilder() throws ReflectiveOperationException {
String builderStr = ConfigUtils.getString(this.clusterConfig,
GobblinClusterConfigurationKeys.TASK_RUNNER_SUITE_BUILDER,
TaskRunnerSuiteBase.Builder.class.getName());
String hostName = "";
try {
hostName = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
logger.warn("Cannot find host name for Helix instance: {}");
}
TaskRunnerSuiteBase.Builder builder = GobblinConstructorUtils.<TaskRunnerSuiteBase.Builder>invokeLongestConstructor(
new ClassAliasResolver(TaskRunnerSuiteBase.Builder.class)
.resolveClass(builderStr), this.clusterConfig);
return builder.setAppWorkPath(this.appWorkPath)
.setContainerMetrics(this.containerMetrics)
.setFileSystem(this.fs)
.setApplicationId(applicationId)
.setApplicationName(applicationName)
.setContainerId(taskRunnerId)
.setHostName(hostName);
}
private Path initAppWorkDir(Config config, Optional<Path> appWorkDirOptional) {
return appWorkDirOptional.isPresent() ? appWorkDirOptional.get() : GobblinClusterUtils
.getAppWorkDirPathFromConfig(config, this.fs, this.applicationName, this.applicationId);
}
private Config saveConfigToFile(Config config)
throws IOException {
Config newConf = config
.withValue(CLUSTER_APP_WORK_DIR, ConfigValueFactory.fromAnyRef(this.appWorkPath.toString()));
ConfigUtils configUtils = new ConfigUtils(new FileUtils());
configUtils.saveConfigToFile(newConf, CLUSTER_CONF_PATH);
return newConf;
}
/**
* Start this {@link GobblinTemporalTaskRunner} instance.
*/
public void start()
throws ContainerHealthCheckException {
logger.info("Calling start method in GobblinTemporalTaskRunner");
logger.info(String.format("Starting in container %s", this.taskRunnerId));
// Start metric reporting
initMetricReporter();
// Add a shutdown hook so the task scheduler gets properly shutdown
addShutdownHook();
try {
for (int i = 0; i < this.numTemporalWorkers; i++) {
workers.add(initiateWorker());
}
}catch (Exception e) {
logger.info(e + " for initiate workers");
throw new RuntimeException(e);
}
}
private TemporalWorker initiateWorker() throws Exception {
logger.info("Starting Temporal Worker");
String connectionUri = clusterConfig.getString(GobblinTemporalConfigurationKeys.TEMPORAL_CONNECTION_STRING);
WorkflowServiceStubs service = TemporalWorkflowClientFactory.createServiceInstance(connectionUri);
String namespace = ConfigUtils.getString(clusterConfig, GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_NAMESPACE,
GobblinTemporalConfigurationKeys.DEFAULT_GOBBLIN_TEMPORAL_NAMESPACE);
WorkflowClient client = TemporalWorkflowClientFactory.createClientInstance(service, namespace);
String workerClassName = ConfigUtils.getString(clusterConfig,
GobblinTemporalConfigurationKeys.WORKER_CLASS, GobblinTemporalConfigurationKeys.DEFAULT_WORKER_CLASS);
TemporalWorker worker = GobblinConstructorUtils.invokeLongestConstructor(
(Class<TemporalWorker>)Class.forName(workerClassName), clusterConfig, client);
worker.start();
logger.info("A new worker is started.");
return worker;
}
private void initMetricReporter() {
if (this.containerMetrics.isPresent()) {
try {
this.containerMetrics.get()
.startMetricReportingWithFileSuffix(ConfigUtils.configToState(this.clusterConfig), this.taskRunnerId);
} catch (MultiReporterException ex) {
if (MetricReportUtils.shouldThrowException(logger, ex, this.isMetricReportingFailureFatal, this.isEventReportingFailureFatal)) {
throw new RuntimeException(ex);
}
}
}
}
public synchronized void stop() {
if (this.isStopped) {
logger.info("Gobblin Task runner is already stopped.");
return;
}
if (this.stopInProgress) {
logger.info("Gobblin Task runner stop already in progress.");
return;
}
this.stopInProgress = true;
logger.info("Stopping the Gobblin Task runner");
// Stop metric reporting
if (this.containerMetrics.isPresent()) {
this.containerMetrics.get().stopMetricsReporting();
}
workers.forEach(TemporalWorker::shutdown);
logger.info("All services are stopped.");
this.isStopped = true;
}
/**
* Creates and returns a {@link List} of additional {@link Service}s that should be run in this
* {@link GobblinTemporalTaskRunner}. Sub-classes that need additional {@link Service}s to run, should override this method
*
* @return a {@link List} of additional {@link Service}s to run.
*/
protected List<Service> getServices() {
List<Service> serviceList = new ArrayList<>();
if (ConfigUtils.getBoolean(this.clusterConfig, GobblinClusterConfigurationKeys.CONTAINER_HEALTH_METRICS_SERVICE_ENABLED,
GobblinClusterConfigurationKeys.DEFAULT_CONTAINER_HEALTH_METRICS_SERVICE_ENABLED)) {
serviceList.add(new ContainerHealthMetricsService(clusterConfig));
}
return serviceList;
}
@VisibleForTesting
boolean isStopped() {
return this.isStopped;
}
private void addShutdownHook() {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
logger.info("Running the shutdown hook");
GobblinTemporalTaskRunner.this.stop();
}
});
}
private Optional<ContainerMetrics> buildContainerMetrics() {
Properties properties = ConfigUtils.configToProperties(this.clusterConfig);
if (GobblinMetrics.isEnabled(properties)) {
logger.info("Container metrics are enabled");
return Optional.of(ContainerMetrics
.get(ConfigUtils.configToState(clusterConfig), this.applicationName, this.taskRunnerId));
} else {
return Optional.absent();
}
}
// hard coded for now
@Override
public Collection<StandardMetrics> getStandardMetricsCollection() {
return null;
}
@Subscribe
public void handleContainerHealthCheckFailureEvent(ContainerHealthCheckFailureEvent event) {
logger.error("Received {} from: {}", event.getClass().getSimpleName(), event.getClassName());
logger.error("Submitting a ContainerHealthCheckFailureEvent..");
submitEvent(event);
logger.error("Stopping GobblinTaskRunner...");
GobblinTemporalTaskRunner.this.setHealthCheckFailed(true);
GobblinTemporalTaskRunner.this.stop();
}
private void submitEvent(ContainerHealthCheckFailureEvent event) {
EventSubmitter eventSubmitter = new EventSubmitter.Builder(RootMetricContext.get(), getClass().getPackage().getName()).build();
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(event.getClass().getSimpleName());
State taskState = ConfigUtils.configToState(event.getConfig());
//Add task metadata such as taskId, containerId, and workflowId if configured
TaskEventMetadataGenerator taskEventMetadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(taskState);
eventBuilder.addAdditionalMetadata(taskEventMetadataGenerator.getMetadata(taskState, event.getClass().getSimpleName()));
eventBuilder.addAdditionalMetadata(event.getMetadata());
eventSubmitter.submit(eventBuilder);
}
private static String getApplicationId() {
return "1";
}
private static String getTaskRunnerId() {
return UUID.randomUUID().toString();
}
public static Options buildOptions() {
Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true,
"Application name");
options.addOption("d", GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME, true,
"Application id");
options.addOption("i", GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME, true,
"Helix instance name");
options.addOption(Option.builder("t").longOpt(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME)
.hasArg(true).required(false).desc("Helix instance tags").build());
return options;
}
public static void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(GobblinClusterManager.class.getSimpleName(), options);
}
public static void main(String[] args)
throws Exception {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)) {
printUsage(options);
System.exit(1);
}
logger.info(JvmUtils.getJvmInputArguments());
String applicationName =
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME);
GobblinTemporalTaskRunner gobblinWorkUnitRunner =
new GobblinTemporalTaskRunner(applicationName, getApplicationId(),
getTaskRunnerId(), ConfigFactory.load(), Optional.<Path>absent());
gobblinWorkUnitRunner.start();
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
| 3,935 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/cluster/TemporalWorker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.cluster;
/** Marker interface for a temporal.io "worker", with capability to `start()` and `shutdown()` */
public interface TemporalWorker {
/** Starts the worker */
void start();
/** Shuts down the worker */
void shutdown();
}
| 3,936 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/cluster/GobblinTemporalClusterManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.cluster;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.ContainerHealthMetricsService;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterMetricTagNames;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.cluster.JobConfigurationManager;
import org.apache.gobblin.cluster.LeadershipChangeAwareComponent;
import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.app.ApplicationException;
import org.apache.gobblin.runtime.app.ApplicationLauncher;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.scheduler.SchedulerService;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobScheduler;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* The central cluster manager for Gobblin Clusters.
*/
@Alpha
@Slf4j
public class GobblinTemporalClusterManager implements ApplicationLauncher, StandardMetricsBridge, LeadershipChangeAwareComponent {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTemporalClusterManager.class);
private StopStatus stopStatus = new StopStatus(false);
protected ServiceBasedAppLauncher applicationLauncher;
// An EventBus used for communications between services running in the ApplicationMaster
@Getter(AccessLevel.PUBLIC)
protected final EventBus eventBus = new EventBus(GobblinTemporalClusterManager.class.getSimpleName());
protected final Path appWorkDir;
@Getter
protected final FileSystem fs;
protected final String applicationId;
// thread used to keep process up for an idle controller
private Thread idleProcessThread;
// set to true to stop the idle process thread
private volatile boolean stopIdleProcessThread = false;
private final boolean isStandaloneMode;
@Getter
private MutableJobCatalog jobCatalog;
@Getter
private JobConfigurationManager jobConfigurationManager;
@Getter
private GobblinTemporalJobScheduler gobblinTemporalJobScheduler;
@Getter
private volatile boolean started = false;
protected final String clusterName;
@Getter
protected final Config config;
public GobblinTemporalClusterManager(String clusterName, String applicationId, Config sysConfig,
Optional<Path> appWorkDirOptional) throws Exception {
// Set system properties passed in via application config.
// overrides such as sessionTimeout. In this case, the overrides specified
GobblinClusterUtils.setSystemProperties(sysConfig);
//Add dynamic config
this.config = GobblinClusterUtils.addDynamicConfig(sysConfig);
this.clusterName = clusterName;
this.isStandaloneMode = ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE_KEY,
GobblinClusterConfigurationKeys.DEFAULT_STANDALONE_CLUSTER_MODE);
this.applicationId = applicationId;
this.fs = GobblinClusterUtils.buildFileSystem(this.config, new Configuration());
this.appWorkDir = appWorkDirOptional.isPresent() ? appWorkDirOptional.get()
: GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, clusterName, applicationId);
LOGGER.info("Configured GobblinTemporalClusterManager work dir to: {}", this.appWorkDir);
initializeAppLauncherAndServices();
}
/**
* Create the service based application launcher and other associated services
* @throws Exception
*/
private void initializeAppLauncherAndServices() throws Exception {
// Done to preserve backwards compatibility with the previously hard-coded timeout of 5 minutes
Properties properties = ConfigUtils.configToProperties(this.config);
if (!properties.contains(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS)) {
properties.setProperty(ServiceBasedAppLauncher.APP_STOP_TIME_SECONDS, Long.toString(300));
}
this.applicationLauncher = new ServiceBasedAppLauncher(properties, this.clusterName);
// create a job catalog for keeping track of received jobs if a job config path is specified
if (this.config.hasPath(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX
+ ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY)) {
String jobCatalogClassName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.JOB_CATALOG_KEY,
GobblinClusterConfigurationKeys.DEFAULT_JOB_CATALOG);
this.jobCatalog =
(MutableJobCatalog) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(jobCatalogClassName),
ImmutableList.of(config
.getConfig(StringUtils.removeEnd(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_PREFIX, "."))
.withFallback(this.config)));
} else {
this.jobCatalog = null;
}
SchedulerService schedulerService = new SchedulerService(properties);
this.applicationLauncher.addService(schedulerService);
this.gobblinTemporalJobScheduler = buildGobblinTemporalJobScheduler(config, this.appWorkDir, getMetadataTags(clusterName, applicationId),
schedulerService);
this.applicationLauncher.addService(this.gobblinTemporalJobScheduler);
this.jobConfigurationManager = buildJobConfigurationManager(config);
this.applicationLauncher.addService(this.jobConfigurationManager);
if (ConfigUtils.getBoolean(this.config, GobblinClusterConfigurationKeys.CONTAINER_HEALTH_METRICS_SERVICE_ENABLED,
GobblinClusterConfigurationKeys.DEFAULT_CONTAINER_HEALTH_METRICS_SERVICE_ENABLED)) {
this.applicationLauncher.addService(new ContainerHealthMetricsService(config));
}
}
/**
* Start any services required by the application launcher then start the application launcher
*/
private void startAppLauncherAndServices() {
// other services such as the job configuration manager have a dependency on the job catalog, so it has be be
// started first
if (this.jobCatalog instanceof Service) {
((Service) this.jobCatalog).startAsync().awaitRunning();
}
this.applicationLauncher.start();
}
/**
* Stop the application launcher then any services that were started outside of the application launcher
*/
private void stopAppLauncherAndServices() {
try {
this.applicationLauncher.stop();
} catch (ApplicationException ae) {
LOGGER.error("Error while stopping Gobblin Cluster application launcher", ae);
}
if (this.jobCatalog instanceof Service) {
((Service) this.jobCatalog).stopAsync().awaitTerminated();
}
}
/**
* Start the Gobblin Temporal Cluster Manager.
*/
@Override
public void start() {
// temporal workflow
LOGGER.info("Starting the Gobblin Temporal Cluster Manager");
this.eventBus.register(this);
startAppLauncherAndServices();
this.started = true;
}
/**
* Stop the Gobblin Cluster Manager.
*/
@Override
public synchronized void stop() {
if (this.stopStatus.isStopInProgress()) {
return;
}
this.stopStatus.setStopInprogress(true);
LOGGER.info("Stopping the Gobblin Cluster Manager");
if (this.idleProcessThread != null) {
try {
this.idleProcessThread.join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
stopAppLauncherAndServices();
}
private GobblinTemporalJobScheduler buildGobblinTemporalJobScheduler(Config sysConfig, Path appWorkDir,
List<? extends Tag<?>> metadataTags, SchedulerService schedulerService) throws Exception {
return new GobblinTemporalJobScheduler(sysConfig,
this.eventBus,
appWorkDir,
metadataTags,
schedulerService);
}
private List<? extends Tag<?>> getMetadataTags(String applicationName, String applicationId) {
return Tag.fromMap(
new ImmutableMap.Builder<String, Object>().put(GobblinClusterMetricTagNames.APPLICATION_NAME, applicationName)
.put(GobblinClusterMetricTagNames.APPLICATION_ID, applicationId).build());
}
/**
* Build the {@link JobConfigurationManager} for the Application Master.
*/
private JobConfigurationManager buildJobConfigurationManager(Config config) {
try {
List<Object> argumentList = (this.jobCatalog != null)? ImmutableList.of(this.eventBus, config, this.jobCatalog, this.fs) :
ImmutableList.of(this.eventBus, config, this.fs);
if (config.hasPath(GobblinClusterConfigurationKeys.JOB_CONFIGURATION_MANAGER_KEY)) {
return (JobConfigurationManager) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(
config.getString(GobblinClusterConfigurationKeys.JOB_CONFIGURATION_MANAGER_KEY)), argumentList.toArray(new Object[argumentList.size()]));
} else {
return new JobConfigurationManager(this.eventBus, config);
}
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unused")
@Subscribe
public void handleApplicationMasterShutdownRequest(ClusterManagerShutdownRequest shutdownRequest) {
stop();
}
@Override
public void close() throws IOException {
this.applicationLauncher.close();
}
@Override
public Collection<StandardMetrics> getStandardMetricsCollection() {
List<StandardMetrics> list = new ArrayList();
list.addAll(this.jobCatalog.getStandardMetricsCollection());
list.addAll(this.jobConfigurationManager.getStandardMetricsCollection());
return list;
}
/**
* comment lifted from {@link org.apache.gobblin.cluster.GobblinClusterManager}
* TODO for now the cluster id is hardcoded to 1 both here and in the {@link GobblinTaskRunner}. In the future, the
* cluster id should be created by the {@link GobblinTemporalClusterManager} and passed to each {@link GobblinTaskRunner}
*/
private static String getApplicationId() {
return "1";
}
private static Options buildOptions() {
Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Gobblin application name");
options.addOption("s", GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE, true, "Standalone cluster mode");
options.addOption("i", GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME, true, "Helix instance name");
return options;
}
private static void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(GobblinTemporalClusterManager.class.getSimpleName(), options);
}
public static void main(String[] args) throws Exception {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)) {
printUsage(options);
System.exit(1);
}
boolean isStandaloneClusterManager = false;
if (cmd.hasOption(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE)) {
isStandaloneClusterManager = Boolean.parseBoolean(cmd.getOptionValue(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE, "false"));
}
LOGGER.info(JvmUtils.getJvmInputArguments());
Config config = ConfigFactory.load();
if (cmd.hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)) {
config = config.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
ConfigValueFactory.fromAnyRef(cmd.getOptionValue(
GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)));
}
if (isStandaloneClusterManager) {
config = config.withValue(GobblinClusterConfigurationKeys.STANDALONE_CLUSTER_MODE_KEY,
ConfigValueFactory.fromAnyRef(true));
}
try (GobblinTemporalClusterManager GobblinTemporalClusterManager = new GobblinTemporalClusterManager(
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), getApplicationId(),
config, Optional.<Path>absent())) {
GobblinTemporalClusterManager.start();
}
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
@Override
public void becomeActive() {
startAppLauncherAndServices();
}
@Override
public void becomeStandby() {
stopAppLauncherAndServices();
try {
initializeAppLauncherAndServices();
} catch (Exception e) {
throw new RuntimeException("Exception reinitializing app launcher services ", e);
}
}
static class StopStatus {
@Getter
@Setter
AtomicBoolean isStopInProgress;
public StopStatus(boolean inProgress) {
isStopInProgress = new AtomicBoolean(inProgress);
}
public void setStopInprogress (boolean inProgress) {
isStopInProgress.set(inProgress);
}
public boolean isStopInProgress () {
return isStopInProgress.get();
}
}
}
| 3,937 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/cluster/WorkerConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.cluster;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import com.typesafe.config.Config;
/**
* Static holder to stash the {@link Config} used to construct each kind of {@link org.apache.gobblin.temporal.cluster.TemporalWorker}
* (within the current JVM). Lookup may be by either the {@link Class} of the worker or of any workflow or activity implementation supplied by
* that worker. The objective is to facilitate sharing the worker's Config with workflow and activity implementations (running within that worker).
*
* ATTENTION: for sanity, construct multiple instances of the same worker always with the same {@link Config}. When this is violated, the `Config`
* given to the most-recently constructed worker "wins".
*
* NOTE: the preservation and sharing of {@link Config} is predicated entirely on its immutability. Thank you TypeSafe!
* Storage indexing uses FQ class name, not the {@link Class}, to be independent of classloader.
*/
@Slf4j
public class WorkerConfig {
private static final ConcurrentHashMap<String, Config> configByFQClassName = new ConcurrentHashMap<>();
private WorkerConfig() {}
/** @return whether initialized now (vs. being previously known) */
public static boolean forWorker(Class<? extends TemporalWorker> workerClass, Config config) {
return storeAs(workerClass.getName(), config);
}
/** @return whether initialized now (vs. being previously known) */
public static boolean withImpl(Class<?> workflowOrActivityImplClass, Config config) {
return storeAs(workflowOrActivityImplClass.getName(), config);
}
public static Optional<Config> ofWorker(Class<? extends TemporalWorker> workerClass) {
return Optional.ofNullable(configByFQClassName.get(workerClass.getName()));
}
public static Optional<Config> ofImpl(Class<?> workflowOrActivityImplClass) {
return Optional.ofNullable(configByFQClassName.get(workflowOrActivityImplClass.getName()));
}
public static Optional<Config> of(Object workflowOrActivityImpl) {
return ofImpl(workflowOrActivityImpl.getClass());
}
private static boolean storeAs(String className, Config config) {
Config prior = configByFQClassName.put(className, config);
log.info("storing config of {} values as '{}'{}", config.entrySet().size(), className, prior == null ? " (new)" : "");
return prior == null;
}
}
| 3,938 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalJobSchedulerMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import com.google.common.base.Optional;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.ContextAwareTimer;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.runtime.api.JobExecutionLauncher;
public class GobblinTemporalJobSchedulerMetrics extends StandardMetricsBridge.StandardMetrics {
public static final String SCHEDULE_CANCELLATION_START = "scheduleCancellationStart";
public static final String SCHEDULE_CANCELLATION_END = "scheduleCancellationEnd";
public static final String TIMER_BEFORE_JOB_SCHEDULING = "timerBeforeJobScheduling";
public static final String TIMER_BEFORE_JOB_LAUNCHING = "timerBeforeJobLaunching";
public static final String TIMER_BETWEEN_JOB_SCHEDULING_AND_LAUNCHING = "timerBetwenJobSchedulingAndLaunching";
final AtomicLong numCancellationStart;
final AtomicLong numCancellationComplete;
final ContextAwareTimer timeBeforeJobScheduling;
final ContextAwareTimer timeBeforeJobLaunching;
final ContextAwareTimer timeBetwenJobSchedulingAndLaunching;
final ThreadPoolExecutor threadPoolExecutor;
public GobblinTemporalJobSchedulerMetrics(final ExecutorService jobExecutor, final MetricContext metricContext, int windowSizeInMin) {
this.timeBeforeJobScheduling = metricContext.contextAwareTimer(TIMER_BEFORE_JOB_SCHEDULING,
windowSizeInMin, TimeUnit.MINUTES);
this.timeBeforeJobLaunching = metricContext.contextAwareTimer(TIMER_BEFORE_JOB_LAUNCHING,
windowSizeInMin, TimeUnit.MINUTES);
this.timeBetwenJobSchedulingAndLaunching = metricContext.contextAwareTimer(TIMER_BETWEEN_JOB_SCHEDULING_AND_LAUNCHING,
windowSizeInMin, TimeUnit.MINUTES);
this.numCancellationStart = new AtomicLong(0);
this.numCancellationComplete = new AtomicLong(0);
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(SCHEDULE_CANCELLATION_START, ()->this.numCancellationStart.get()));
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(SCHEDULE_CANCELLATION_END, ()->this.numCancellationComplete.get()));
this.contextAwareMetrics.add(timeBeforeJobScheduling);
this.contextAwareMetrics.add(timeBeforeJobLaunching);
this.contextAwareMetrics.add(timeBetwenJobSchedulingAndLaunching);
this.threadPoolExecutor = (ThreadPoolExecutor) jobExecutor;
// executor metrics
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_ACTIVE_COUNT, ()->this.threadPoolExecutor.getActiveCount()));
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_MAX_POOL_SIZE, ()->this.threadPoolExecutor.getMaximumPoolSize()));
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_POOL_SIZE, ()->this.threadPoolExecutor.getPoolSize()));
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_CORE_POOL_SIZE, ()->this.threadPoolExecutor.getCorePoolSize()));
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.EXECUTOR_QUEUE_SIZE, ()->this.threadPoolExecutor.getQueue().size()));
}
void updateTimeBeforeJobScheduling (Properties jobProps) {
long jobCreationTime = Long.parseLong(jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "0"));
Instrumented.updateTimer(Optional.of(timeBeforeJobScheduling),
System.currentTimeMillis() - jobCreationTime,
TimeUnit.MILLISECONDS);
}
void updateTimeBeforeJobLaunching (Properties jobProps) {
long jobCreationTime = Long.parseLong(jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "0"));
Instrumented.updateTimer(Optional.of(timeBeforeJobLaunching),
System.currentTimeMillis() - jobCreationTime,
TimeUnit.MILLISECONDS);
}
void updateTimeBetweenJobSchedulingAndJobLaunching (long scheduledTime, long launchingTime) {
Instrumented.updateTimer(Optional.of(timeBetwenJobSchedulingAndLaunching),
launchingTime - scheduledTime,
TimeUnit.MILLISECONDS);
}
}
| 3,939 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalPlanningJobLauncherMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.cluster.HelixJobsMapping;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.metrics.ContextAwareTimer;
import org.apache.gobblin.metrics.MetricContext;
public class GobblinTemporalPlanningJobLauncherMetrics extends StandardMetricsBridge.StandardMetrics {
private final String metricsName;
public static final String TIMER_FOR_COMPLETED_PLANNING_JOBS = "timeForCompletedPlanningJobs";
public static final String TIMER_FOR_FAILED_PLANNING_JOBS = "timeForFailedPlanningJobs";
public static final String METER_FOR_SKIPPED_PLANNING_JOBS = "skippedPlanningJobs";
final ContextAwareTimer timeForCompletedPlanningJobs;
final ContextAwareTimer timeForFailedPlanningJobs;
final ContextAwareMeter skippedPlanningJobs;
public GobblinTemporalPlanningJobLauncherMetrics(String metricsName,
final MetricContext metricContext,
int windowSizeInMin,
HelixJobsMapping jobsMapping) {
this.metricsName = metricsName;
this.timeForCompletedPlanningJobs = metricContext.contextAwareTimer(TIMER_FOR_COMPLETED_PLANNING_JOBS, windowSizeInMin, TimeUnit.MINUTES);
this.timeForFailedPlanningJobs = metricContext.contextAwareTimer(TIMER_FOR_FAILED_PLANNING_JOBS, windowSizeInMin, TimeUnit.MINUTES);
this.skippedPlanningJobs = metricContext.contextAwareMeter(METER_FOR_SKIPPED_PLANNING_JOBS);
this.contextAwareMetrics.add(timeForCompletedPlanningJobs);
this.contextAwareMetrics.add(timeForFailedPlanningJobs);
}
public void updateTimeForCompletedPlanningJobs(long startTime) {
Instrumented.updateTimer(
com.google.common.base.Optional.of(this.timeForCompletedPlanningJobs),
System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
}
public void updateTimeForFailedPlanningJobs(long startTime) {
Instrumented.updateTimer(
com.google.common.base.Optional.of(this.timeForFailedPlanningJobs),
System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
}
@Override
public String getName() {
return this.metricsName;
}
}
| 3,940 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import io.temporal.client.WorkflowClient;
import io.temporal.serviceclient.WorkflowServiceStubs;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.cluster.GobblinTemporalTaskRunner;
import static org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys.*;
import static org.apache.gobblin.temporal.workflows.client.TemporalWorkflowClientFactory.createClientInstance;
import static org.apache.gobblin.temporal.workflows.client.TemporalWorkflowClientFactory.createServiceInstance;
/**
* An implementation of {@link JobLauncher} that launches a Gobblin job using the Temporal task framework.
*
* <p>
* Each {@link WorkUnit} of the job is persisted to the {@link FileSystem} of choice and the path to the file
* storing the serialized {@link WorkUnit} is passed to the Temporal task running the {@link WorkUnit} as a
* user-defined property {@link GobblinClusterConfigurationKeys#WORK_UNIT_FILE_PATH}. Upon startup, the gobblin
* task reads the property for the file path and de-serializes the {@link WorkUnit} from the file.
* </p>
*
* <p>
* This class is instantiated by the {@link GobblinTemporalJobScheduler} on every job submission to launch the Gobblin job.
* The actual task execution happens in the {@link GobblinTemporalTaskRunner}, usually in a different process.
* </p>
*/
@Alpha
@Slf4j
public abstract class GobblinTemporalJobLauncher extends GobblinJobLauncher {
protected WorkflowServiceStubs workflowServiceStubs;
protected WorkflowClient client;
protected String queueName;
public GobblinTemporalJobLauncher(Properties jobProps, Path appWorkDir,
List<? extends Tag<?>> metadataTags, ConcurrentHashMap<String, Boolean> runningMap)
throws Exception {
super(jobProps, appWorkDir, metadataTags, runningMap);
log.debug("GobblinTemporalJobLauncher: jobProps {}, appWorkDir {}", jobProps, appWorkDir);
String connectionUri = jobProps.getProperty(TEMPORAL_CONNECTION_STRING);
this.workflowServiceStubs = createServiceInstance(connectionUri);
String namespace = jobProps.getProperty(GOBBLIN_TEMPORAL_NAMESPACE, DEFAULT_GOBBLIN_TEMPORAL_NAMESPACE);
this.client = createClientInstance(workflowServiceStubs, namespace);
this.queueName = jobProps.getProperty(GOBBLIN_TEMPORAL_TASK_QUEUE, DEFAULT_GOBBLIN_TEMPORAL_TASK_QUEUE);
startCancellationExecutor();
}
/**
* Submit a job to run.
*/
@Override
abstract protected void submitJob(List<WorkUnit> workUnits) throws Exception;
@Override
protected void executeCancellation() {
log.info("Cancel temporal workflow");
}
@Override
protected void removeTasksFromCurrentJob(List<String> workUnitIdsToRemove) {
log.info("Temporal removeTasksFromCurrentJob");
}
protected void addTasksToCurrentJob(List<WorkUnit> workUnitsToAdd) {
log.info("Temporal addTasksToCurrentJob");
}
}
| 3,941 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalJobLauncherMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.metrics.ContextAwareTimer;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.runtime.api.JobExecutionLauncher;
/**
* Metrics that relates to jobs launched by {@link GobblinTemporalJobLauncher}.
*/
public class GobblinTemporalJobLauncherMetrics extends StandardMetricsBridge.StandardMetrics {
private final String metricsName;
final ContextAwareMeter numJobsLaunched;
final ContextAwareMeter numJobsCompleted;
final ContextAwareMeter numJobsCommitted;
final ContextAwareMeter numJobsFailed;
final ContextAwareMeter numJobsCancelled;
final ContextAwareTimer timeForCompletedJobs;
final ContextAwareTimer timeForFailedJobs;
final ContextAwareTimer timeForCommittedJobs;
public GobblinTemporalJobLauncherMetrics(String metricsName, final MetricContext metricContext, int windowSizeInMin) {
this.metricsName = metricsName;
this.numJobsLaunched = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_LAUNCHED);
this.contextAwareMetrics.add(this.numJobsLaunched);
this.numJobsCompleted = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_COMPLETED);
this.contextAwareMetrics.add(this.numJobsCompleted);
this.numJobsCommitted = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_COMMITTED);
this.contextAwareMetrics.add(this.numJobsCommitted);
this.numJobsFailed = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_FAILED);
this.contextAwareMetrics.add(this.numJobsFailed);
this.numJobsCancelled = metricContext.contextAwareMeter(JobExecutionLauncher.StandardMetrics.NUM_JOBS_CANCELLED);
this.contextAwareMetrics.add(this.numJobsCancelled);
this.contextAwareMetrics.add(metricContext.newContextAwareGauge(JobExecutionLauncher.StandardMetrics.NUM_JOBS_RUNNING,
() -> (int) (
GobblinTemporalJobLauncherMetrics.this.numJobsLaunched.getCount() - GobblinTemporalJobLauncherMetrics.this.numJobsCompleted.getCount())));
this.timeForCompletedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_COMPLETED_JOBS, windowSizeInMin, TimeUnit.MINUTES);
this.timeForFailedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_FAILED_JOBS, windowSizeInMin, TimeUnit.MINUTES);
this.timeForCommittedJobs = metricContext.contextAwareTimer(JobExecutionLauncher.StandardMetrics.TIMER_FOR_COMMITTED_JOBS, windowSizeInMin, TimeUnit.MINUTES);
this.contextAwareMetrics.add(timeForCommittedJobs);
this.contextAwareMetrics.add(timeForCompletedJobs);
this.contextAwareMetrics.add(timeForFailedJobs);
}
@Override
public String getName() {
return this.metricsName;
}
}
| 3,942 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalJobScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinHelixJob;
import org.apache.gobblin.cluster.HelixJobsMapping;
import org.apache.gobblin.cluster.event.CancelJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.DeleteJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.UpdateJobConfigArrivalEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.scheduler.JobScheduler;
import org.apache.gobblin.scheduler.SchedulerService;
import org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* An extension to {@link JobScheduler} that schedules and runs
* Gobblin jobs on Temporal.
*
* <p> If the job should be launched from the scheduler node,
* {@link GobblinTemporalJobLauncher} is invoked.
* TODO(yiyang): this file should be cleaned up with HelixJobScheduler.
*/
@Alpha
public class GobblinTemporalJobScheduler extends JobScheduler implements StandardMetricsBridge {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTemporalJobScheduler.class);
private static final String COMMON_JOB_PROPS = "gobblin.common.job.props";
private final Properties commonJobProperties;
private final EventBus eventBus;
private final Path appWorkDir;
private final List<? extends Tag<?>> metadataTags;
private final ConcurrentHashMap<String, Boolean> jobRunningMap;
private final MetricContext metricContext;
final GobblinTemporalJobSchedulerMetrics jobSchedulerMetrics;
final GobblinTemporalJobLauncherMetrics launcherMetrics;
final GobblinTemporalPlanningJobLauncherMetrics planningJobLauncherMetrics;
final HelixJobsMapping jobsMapping;
private boolean startServicesCompleted;
public GobblinTemporalJobScheduler(Config sysConfig,
EventBus eventBus,
Path appWorkDir, List<? extends Tag<?>> metadataTags,
SchedulerService schedulerService) throws Exception {
super(ConfigUtils.configToProperties(sysConfig), schedulerService);
this.commonJobProperties = ConfigUtils.configToProperties(ConfigUtils.getConfigOrEmpty(sysConfig, COMMON_JOB_PROPS));
this.eventBus = eventBus;
this.jobRunningMap = new ConcurrentHashMap<>();
this.appWorkDir = appWorkDir;
this.metadataTags = metadataTags;
this.metricContext = Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(properties), this.getClass());
int metricsWindowSizeInMin = ConfigUtils.getInt(sysConfig,
ConfigurationKeys.METRIC_TIMER_WINDOW_SIZE_IN_MINUTES,
ConfigurationKeys.DEFAULT_METRIC_TIMER_WINDOW_SIZE_IN_MINUTES);
this.launcherMetrics = new GobblinTemporalJobLauncherMetrics("launcherInScheduler",
this.metricContext,
metricsWindowSizeInMin);
this.jobSchedulerMetrics = new GobblinTemporalJobSchedulerMetrics(this.jobExecutor,
this.metricContext,
metricsWindowSizeInMin);
this.jobsMapping = new HelixJobsMapping(ConfigUtils.propertiesToConfig(properties),
PathUtils.getRootPath(appWorkDir).toUri(),
appWorkDir.toString());
this.planningJobLauncherMetrics = new GobblinTemporalPlanningJobLauncherMetrics("planningLauncherInScheduler",
this.metricContext,
metricsWindowSizeInMin, this.jobsMapping);
this.startServicesCompleted = false;
}
@Override
public Collection<StandardMetrics> getStandardMetricsCollection() {
return ImmutableList.of(this.launcherMetrics,
this.jobSchedulerMetrics,
this.planningJobLauncherMetrics);
}
@Override
protected void startUp() throws Exception {
this.eventBus.register(this);
super.startUp();
this.startServicesCompleted = true;
}
@Override
public void scheduleJob(Properties jobProps, JobListener jobListener) throws JobException {
try {
while (!startServicesCompleted) {
LOGGER.info("{} service is not fully up, waiting here...", this.getClass().getName());
Thread.sleep(1000);
}
scheduleJob(jobProps,
jobListener,
Maps.newHashMap(),
GobblinHelixJob.class);
} catch (Exception e) {
throw new JobException("Failed to schedule job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e);
}
}
@Override
protected void startServices() throws Exception {
boolean cleanAllDistJobs = PropertiesUtils.getPropAsBoolean(this.properties,
GobblinClusterConfigurationKeys.CLEAN_ALL_DIST_JOBS,
String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_CLEAN_ALL_DIST_JOBS));
if (cleanAllDistJobs) {
for (org.apache.gobblin.configuration.State state : this.jobsMapping.getAllStates()) {
String jobUri = state.getId();
LOGGER.info("Delete mapping for job " + jobUri);
this.jobsMapping.deleteMapping(jobUri);
}
}
}
@Override
public void runJob(Properties jobProps, JobListener jobListener) throws JobException {
}
@Override
public GobblinTemporalJobLauncher buildJobLauncher(Properties jobProps)
throws Exception {
Properties combinedProps = new Properties();
combinedProps.putAll(properties);
combinedProps.putAll(jobProps);
Class<? extends GobblinTemporalJobLauncher> jobLauncherClass =
(Class<? extends GobblinTemporalJobLauncher>) Class.forName(combinedProps.getProperty(
GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_JOB_LAUNCHER_CLASS, GobblinTemporalConfigurationKeys.DEFAULT_GOBBLIN_TEMPORAL_JOB_LAUNCHER_CLASS));
return GobblinConstructorUtils.invokeLongestConstructor(jobLauncherClass, combinedProps,
this.appWorkDir,
this.metadataTags,
this.jobRunningMap);
}
@Subscribe
public void handleNewJobConfigArrival(NewJobConfigArrivalEvent newJobArrival) {
String jobUri = newJobArrival.getJobName();
LOGGER.info("Received new job configuration of job " + jobUri);
try {
Properties jobProps = new Properties();
jobProps.putAll(this.commonJobProperties);
jobProps.putAll(newJobArrival.getJobConfig());
// set uri so that we can delete this job later
jobProps.setProperty(GobblinClusterConfigurationKeys.JOB_SPEC_URI, jobUri);
this.jobSchedulerMetrics.updateTimeBeforeJobScheduling(jobProps);
if (jobProps.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) {
LOGGER.info("Scheduling job " + jobUri);
scheduleJob(jobProps,
new GobblinTemporalJobLauncherListener(this.launcherMetrics));
} else {
LOGGER.info("No job schedule found, so running job " + jobUri);
GobblinTemporalJobLauncherListener listener = new GobblinTemporalJobLauncherListener(this.launcherMetrics);
JobLauncher launcher = buildJobLauncher(newJobArrival.getJobConfig());
launcher.launchJob(listener);
}
} catch (Exception je) {
LOGGER.error("Failed to schedule or run job " + jobUri, je);
}
}
@Subscribe
public void handleUpdateJobConfigArrival(UpdateJobConfigArrivalEvent updateJobArrival) {
LOGGER.info("Received update for job configuration of job " + updateJobArrival.getJobName());
try {
handleDeleteJobConfigArrival(new DeleteJobConfigArrivalEvent(updateJobArrival.getJobName(),
updateJobArrival.getJobConfig()));
} catch (Exception je) {
LOGGER.error("Failed to update job " + updateJobArrival.getJobName(), je);
}
try {
handleNewJobConfigArrival(new NewJobConfigArrivalEvent(updateJobArrival.getJobName(),
updateJobArrival.getJobConfig()));
} catch (Exception je) {
LOGGER.error("Failed to update job " + updateJobArrival.getJobName(), je);
}
}
private void waitForJobCompletion(String jobName) {
while (this.jobRunningMap.getOrDefault(jobName, false)) {
LOGGER.info("Waiting for job {} to stop...", jobName);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOGGER.warn("Interrupted exception encountered: ", e);
}
}
}
@Subscribe
public void handleDeleteJobConfigArrival(DeleteJobConfigArrivalEvent deleteJobArrival) throws InterruptedException {
LOGGER.info("Received delete for job configuration of job " + deleteJobArrival.getJobName());
try {
unscheduleJob(deleteJobArrival.getJobName());
} catch (JobException je) {
LOGGER.error("Failed to unschedule job " + deleteJobArrival.getJobName());
}
}
@Subscribe
public void handleCancelJobConfigArrival(CancelJobConfigArrivalEvent cancelJobArrival)
throws InterruptedException {
}
/**
* This class is responsible for running non-scheduled jobs.
*/
class NonScheduledJobRunner implements Runnable {
private final Properties jobProps;
private final GobblinTemporalJobLauncherListener jobListener;
private final Long creationTimeInMillis;
public NonScheduledJobRunner(Properties jobProps,
GobblinTemporalJobLauncherListener jobListener) {
this.jobProps = jobProps;
this.jobListener = jobListener;
this.creationTimeInMillis = System.currentTimeMillis();
}
@Override
public void run() {
try {
GobblinTemporalJobScheduler.this.jobSchedulerMetrics.updateTimeBeforeJobLaunching(this.jobProps);
GobblinTemporalJobScheduler.this.jobSchedulerMetrics.updateTimeBetweenJobSchedulingAndJobLaunching(this.creationTimeInMillis, System.currentTimeMillis());
GobblinTemporalJobScheduler.this.runJob(this.jobProps, this.jobListener);
} catch (JobException je) {
LOGGER.error("Failed to run job " + this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), je);
}
}
}
}
| 3,943 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinTemporalJobLauncherListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.cluster.GobblinHelixJobLauncher;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.listeners.AbstractJobListener;
import org.apache.gobblin.runtime.listeners.JobListener;
/**
* A job listener used when {@link GobblinHelixJobLauncher} launches a job.
* The {@link GobblinTemporalJobLauncherMetrics} will always be passed in because
* it will be be updated accordingly.
*/
class GobblinTemporalJobLauncherListener extends AbstractJobListener {
private final GobblinTemporalJobLauncherMetrics jobLauncherMetrics;
private static final String JOB_START_TIME = "jobStartTime";
GobblinTemporalJobLauncherListener(GobblinTemporalJobLauncherMetrics jobLauncherMetrics) {
this.jobLauncherMetrics = jobLauncherMetrics;
}
@Override
public void onJobPrepare(JobContext jobContext)
throws Exception {
super.onJobPrepare(jobContext);
jobContext.getJobState().setProp(JOB_START_TIME, Long.toString(System.nanoTime()));
jobLauncherMetrics.numJobsLaunched.mark();
}
/**
* From {@link org.apache.gobblin.runtime.AbstractJobLauncher#launchJob(JobListener)}, the final
* job state should only be FAILED or COMMITTED. This means the completed jobs metrics covers
* both failed jobs and committed jobs.
*/
@Override
public void onJobCompletion(JobContext jobContext)
throws Exception {
super.onJobCompletion(jobContext);
long startTime = jobContext.getJobState().getPropAsLong(JOB_START_TIME);
jobLauncherMetrics.numJobsCompleted.mark();
Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForCompletedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
if (jobContext.getJobState().getState() == JobState.RunningState.FAILED) {
jobLauncherMetrics.numJobsFailed.mark();
Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForFailedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
} else {
jobLauncherMetrics.numJobsCommitted.mark();
Instrumented.updateTimer(Optional.of(jobLauncherMetrics.timeForCommittedJobs), System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
}
}
@Override
public void onJobCancellation(JobContext jobContext)
throws Exception {
super.onJobCancellation(jobContext);
jobLauncherMetrics.numJobsCancelled.mark();
}
}
| 3,944 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/joblauncher/GobblinJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.joblauncher;
import com.google.common.annotations.VisibleForTesting;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.CountEventBuilder;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.rest.LauncherTypeEnum;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskStateCollectorService;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.runtime.util.StateStores;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* An implementation of {@link JobLauncher} that launches a Gobblin job using the Temporal task framework.
* Most of this code is lifted from {@link org.apache.gobblin.cluster.GobblinHelixJobLauncher} and maybe in the future
* it may make sense to converge the code once Temporal on Gobblin is in a mature state.
*
* <p>
* Each {@link WorkUnit} of the job is persisted to the {@link FileSystem} of choice and the path to the file
* storing the serialized {@link WorkUnit} is passed to the Temporal task running the {@link WorkUnit} as a
* user-defined property {@link GobblinClusterConfigurationKeys#WORK_UNIT_FILE_PATH}. Upon startup, the gobblin
* task reads the property for the file path and de-serializes the {@link WorkUnit} from the file.
* </p>
*/
@Alpha
@Slf4j
public abstract class GobblinJobLauncher extends AbstractJobLauncher {
protected static final String WORK_UNIT_FILE_EXTENSION = ".wu";
protected final FileSystem fs;
protected final Path appWorkDir;
protected final Path inputWorkUnitDir;
protected final Path outputTaskStateDir;
// Number of ParallelRunner threads to be used for state serialization/deserialization
protected final int stateSerDeRunnerThreads;
protected final TaskStateCollectorService taskStateCollectorService;
protected final ConcurrentHashMap<String, Boolean> runningMap;
@Getter
protected final StateStores stateStores;
protected JobListener jobListener;
protected volatile boolean jobSubmitted = false;
public GobblinJobLauncher(Properties jobProps, Path appWorkDir,
List<? extends Tag<?>> metadataTags, ConcurrentHashMap<String, Boolean> runningMap)
throws Exception {
super(jobProps, HelixUtils.initBaseEventTags(jobProps, metadataTags));
log.debug("GobblinJobLauncher: jobProps {}, appWorkDir {}", jobProps, appWorkDir);
this.runningMap = runningMap;
this.appWorkDir = appWorkDir;
this.inputWorkUnitDir = new Path(appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME);
this.outputTaskStateDir = new Path(this.appWorkDir,
GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME + Path.SEPARATOR + this.jobContext.getJobId());
this.jobContext.getJobState().setJobLauncherType(LauncherTypeEnum.CLUSTER);
this.stateSerDeRunnerThreads = Integer.parseInt(jobProps.getProperty(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY,
Integer.toString(ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS)));
Config stateStoreJobConfig = ConfigUtils.propertiesToConfig(jobProps)
.withValue(ConfigurationKeys.STATE_STORE_FS_URI_KEY, ConfigValueFactory.fromAnyRef(
new URI(appWorkDir.toUri().getScheme(), null, appWorkDir.toUri().getHost(), appWorkDir.toUri().getPort(),
"/", null, null).toString()));
this.stateStores =
new StateStores(stateStoreJobConfig, appWorkDir, GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME,
appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME, appWorkDir,
GobblinClusterConfigurationKeys.JOB_STATE_DIR_NAME);
URI fsUri = URI.create(jobProps.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI));
this.fs = FileSystem.get(fsUri, new Configuration());
this.taskStateCollectorService =
new TaskStateCollectorService(jobProps, this.jobContext.getJobState(), this.eventBus, this.eventSubmitter,
this.stateStores.getTaskStateStore(), this.outputTaskStateDir, this.getIssueRepository());
}
@Override
public void close() throws IOException {
try {
executeCancellation();
} finally {
super.close();
}
}
public String getJobId() {
return this.jobContext.getJobId();
}
@Override
protected void runWorkUnits(List<WorkUnit> workUnits) throws Exception {
try {
CountEventBuilder countEventBuilder = new CountEventBuilder(JobEvent.WORK_UNITS_CREATED, workUnits.size());
this.eventSubmitter.submit(countEventBuilder);
log.info("Emitting WorkUnitsCreated Count: " + countEventBuilder.getCount());
long workUnitStartTime = System.currentTimeMillis();
workUnits.forEach((k) -> k.setProp(ConfigurationKeys.WORK_UNIT_CREATION_TIME_IN_MILLIS, workUnitStartTime));
// Start the output TaskState collector service
this.taskStateCollectorService.startAsync().awaitRunning();
TimingEvent jobSubmissionTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.HELIX_JOB_SUBMISSION);
synchronized (this.cancellationRequest) {
if (!this.cancellationRequested) {
submitJob(workUnits);
jobSubmissionTimer.stop();
log.info(String.format("Submitted job %s", this.jobContext.getJobId()));
this.jobSubmitted = true;
} else {
log.warn("Job {} not submitted as it was requested to be cancelled.", this.jobContext.getJobId());
}
}
TimingEvent jobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.HELIX_JOB_RUN);
waitJob();
jobRunTimer.stop();
log.info(String.format("Job %s completed", this.jobContext.getJobId()));
} finally {
// The last iteration of output TaskState collecting will run when the collector service gets stopped
this.taskStateCollectorService.stopAsync().awaitTerminated();
cleanupWorkingDirectory();
}
}
protected void submitJob(List<WorkUnit> workUnits) throws Exception {
}
protected void waitJob() throws InterruptedException {
}
@Override
protected void executeCancellation() {
}
public void launchJob(@Nullable JobListener jobListener) throws JobException {
this.jobListener = jobListener;
log.info("Launching Job");
boolean isLaunched = false;
this.runningMap.putIfAbsent(this.jobContext.getJobName(), false);
Throwable errorInJobLaunching = null;
try {
if (this.runningMap.replace(this.jobContext.getJobName(), false, true)) {
log.info("Job {} will be executed, add into running map.", this.jobContext.getJobId());
isLaunched = true;
launchJobImpl(jobListener);
} else {
log.warn("Job {} will not be executed because other jobs are still running.", this.jobContext.getJobId());
}
// TODO: Better error handling. The current impl swallows exceptions for jobs that were started by this method call.
// One potential way to improve the error handling is to make this error swallowing conifgurable
} catch (Throwable t) {
errorInJobLaunching = t;
if (isLaunched) {
// Attempts to cancel workflow if an error occurs during launch
cancelJob(jobListener);
}
} finally {
if (isLaunched) {
if (this.runningMap.replace(this.jobContext.getJobName(), true, false)) {
log.info("Job {} is done, remove from running map.", this.jobContext.getJobId());
} else {
throw errorInJobLaunching == null ? new IllegalStateException(
"A launched job should have running state equal to true in the running map.")
: new RuntimeException("Failure in launching job:", errorInJobLaunching);
}
}
}
}
/**
* This method looks silly at first glance but exists for a reason.
*
* The method {@link GobblinJobLauncher#launchJob(JobListener)} contains boiler plate for handling exceptions and
* mutating the runningMap to communicate state back to the {@link GobblinJobScheduler}. The boiler plate swallows
* exceptions when launching the job because many use cases require that 1 job failure should not affect other jobs by causing the
* entire process to fail through an uncaught exception.
*
* This method is useful for unit testing edge cases where we expect {@link JobException}s during the underlying launch operation.
* It would be nice to not swallow exceptions, but the implications of doing that will require careful refactoring since
* the class {@link GobblinJobLauncher} and {@link GobblinJobScheduler} are shared for 2 quite different cases
* between GaaS and streaming. GaaS typically requiring many short lifetime workflows (where a failure is tolerated) and
* streaming requiring a small number of long running workflows (where failure to submit is unexpected and is not
* tolerated)
*
* @throws JobException
*/
@VisibleForTesting
void launchJobImpl(@Nullable JobListener jobListener) throws JobException {
super.launchJob(jobListener);
}
/**
* Delete persisted {@link WorkUnit}s and {@link JobState} upon job completion.
*/
protected void cleanupWorkingDirectory() throws IOException {
log.info("Deleting persisted work units for job " + this.jobContext.getJobId());
stateStores.getWuStateStore().delete(this.jobContext.getJobId());
// delete the directory that stores the task state files
stateStores.getTaskStateStore().delete(outputTaskStateDir.getName());
log.info("Deleting job state file for job " + this.jobContext.getJobId());
if (this.stateStores.haveJobStateStore()) {
this.stateStores.getJobStateStore().delete(this.jobContext.getJobId());
} else {
Path jobStateFilePath =
GobblinClusterUtils.getJobStateFilePath(false, this.appWorkDir, this.jobContext.getJobId());
this.fs.delete(jobStateFilePath, false);
}
}
}
| 3,945 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/work/SeqBackedWorkSpan.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.work;
import java.util.Iterator;
import java.util.List;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
/** Logical sub-sequence of `WORK_ITEM`s, backed for simplicity's sake by an in-memory collection */
@NoArgsConstructor
@RequiredArgsConstructor
public class SeqBackedWorkSpan<WORK_ITEM> implements Workload.WorkSpan<WORK_ITEM> {
@NonNull
private List<WORK_ITEM> elems;
// CAUTION: despite the "warning: @NonNull is meaningless on a primitive @lombok.RequiredArgsConstructor"...
// if removed, no two-arg ctor is generated, so syntax error on `new CollectionBackedTaskSpan(elems, startIndex)`
@NonNull
private int startingIndex;
private transient Iterator<WORK_ITEM> statefulDelegatee = null;
@Override
public int getNumElems() {
return elems.size();
}
@Override
public boolean hasNext() {
if (statefulDelegatee == null) {
statefulDelegatee = elems.iterator();
}
return statefulDelegatee.hasNext();
}
@Override
public WORK_ITEM next() {
if (statefulDelegatee == null) {
throw new IllegalStateException("first call `hasNext()`!");
}
return statefulDelegatee.next();
}
@Override
public String toString() {
return getClassNickname() + "(" + startingIndex + "... {+" + getNumElems() + "})";
}
protected String getClassNickname() {
// return getClass().getSimpleName();
return "WorkSpan";
}
}
| 3,946 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/work/WorkflowAddr.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.work;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
/** Hierarchical address for nesting workflows (0-based). */
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@RequiredArgsConstructor
public class WorkflowAddr {
public static final String SEP = ".";
/** initial, top-level address */
public static final WorkflowAddr ROOT = new WorkflowAddr(0);
@Getter
@NonNull // IMPORTANT: for jackson (de)serialization (which won't permit `final`)
private List<Integer> segments;
public WorkflowAddr(final int firstLevelOnly) {
this(Lists.newArrayList(firstLevelOnly));
}
/** @return 0-based depth */
@JsonIgnore // (because no-arg method resembles 'java bean property')
public int getDepth() {
return segments.size() - 1;
}
/** Create a child of the current `WFAddr` */
public WorkflowAddr createChild(int childLevel) {
final List<Integer> copy = new ArrayList<>(segments);
copy.add(childLevel);
return new WorkflowAddr(copy);
}
@Override
public String toString() {
return Joiner.on(SEP).join(segments);
}
}
| 3,947 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/work/Workload.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.work;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import java.util.Iterator;
import java.util.Optional;
/**
* {@link Workload} models a logical collection of homogenous inputs over which a "foreach" operation can asynchronously apply
* an arbitrary procedure to each element. This encapsulates "processing" the entire collection of sequential
* "work item" specifications by the uniform application of the chosen procedure(s).
*
* Given Temporal's required determinism, the work items and work spans should remain unchanged, with stable sequential
* ordering. This need not constrain `Workload`s to eager, advance elaboration: "streaming" definition is possible,
* so long as producing a deterministic result.
*
* A actual, real-world workload might correspond to datastore contents, such as records serialized into HDFS files
* or ordered DB query results.
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "@class") // to handle impls
public interface Workload<WORK_ITEM> {
/**
* @return a sequential sub-sequence, from `startIndex` (0-based), unless it falls beyond the underlying sequence
* NOTE: this is a blocking call that forces elaboration: `WorkSpan.getNumElems() < numElements` signifies end of seq
*/
Optional<WorkSpan<WORK_ITEM>> getSpan(int startIndex, int numElements);
/** Non-blocking, best-effort advice: to support non-strict elaboration, does NOT guarantee `index` will not exceed */
boolean isIndexKnownToExceed(int index);
default boolean isDefiniteSize() {
return false;
}
/** Logical sub-sequence 'slice' of contiguous work items */
public interface WorkSpan<T> extends Iterator<T> {
int getNumElems();
}
}
| 3,948 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/work/SeqSliceBackedWorkSpan.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.work;
import java.util.NoSuchElementException;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import com.fasterxml.jackson.annotation.JsonIgnore;
/** Logical sub-sequence of `WORK_ITEM`s, backed for simplicity's sake by an in-memory collection, *SHARED* w/ other work spans */
@NoArgsConstructor
@RequiredArgsConstructor
public class SeqSliceBackedWorkSpan<WORK_ITEM> implements Workload.WorkSpan<WORK_ITEM> {
private static final int NOT_SET_SENTINEL = -1;
@NonNull private WORK_ITEM[] sharedElems;
// CAUTION: despite the "warning: @NonNull is meaningless on a primitive @lombok.RequiredArgsConstructor"...
// if removed, no two-arg ctor is generated, so syntax error on `new CollectionSliceBackedTaskSpan(elems, startIndex)`
@NonNull private int startingIndex;
@NonNull private int numElements;
private transient volatile int nextElemIndex = NOT_SET_SENTINEL;
@Override
public int getNumElems() {
return getEndingIndex() - startingIndex;
}
@Override
public boolean hasNext() {
if (nextElemIndex == NOT_SET_SENTINEL) {
nextElemIndex = startingIndex; // NOTE: `startingIndex` should be effectively `final` (post-deser) and always >= 0
}
return nextElemIndex < this.getEndingIndex();
}
@Override
public WORK_ITEM next() {
if (nextElemIndex >= startingIndex + numElements) {
throw new NoSuchElementException("index " + nextElemIndex + " >= " + startingIndex + " + " + numElements);
}
return sharedElems[nextElemIndex++];
}
@Override
public String toString() {
return getClassNickname() + "(" + startingIndex + "... {+" + getNumElems() + "})";
}
protected String getClassNickname() {
// return getClass().getSimpleName();
return "WorkSpan";
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected final int getEndingIndex() {
return Math.min(startingIndex + numElements, sharedElems.length);
}
}
| 3,949 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/workflow/NestingExecWorkflow.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.workflow;
import java.util.Optional;
import io.temporal.workflow.WorkflowInterface;
import io.temporal.workflow.WorkflowMethod;
import org.apache.gobblin.temporal.util.nesting.work.WorkflowAddr;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
/**
* Process all `WORK_ITEM`s of {@link Workload}, from `startIndex` to the end by creating child workflows, where this and
* descendants should have at most `maxBranchesPerTree`, with at most `maxSubTreesPerTree` of those being child
* workflows. (Non-child-workflow (terminal) branches are the activity executions.)
*
* The underlying motivation is to create logical workflows of unbounded size, despite Temporal's event history limit
* of 50Ki events; see: https://docs.temporal.io/workflows#event-history
*
* IMPORTANT: `Math.sqrt(maxBranchesPerTree) == maxSubTreesPerTree` provides a good rule-of-thumb; `maxSubTreesPerTree
* must not exceed that. This enables consolidation, wherein continued expansion occurs only along the tree's right-most edges.
*
* @param <WORK_ITEM> the type of task for which to invoke an appropriate activity
* @param maxSubTreesForCurrentTreeOverride when the current tree should use different max sub-trees than descendants
*/
@WorkflowInterface
public interface NestingExecWorkflow<WORK_ITEM> {
/** @return the number of workload elements processed cumulatively by this Workflow and its children */
@WorkflowMethod
int performWorkload(
WorkflowAddr addr,
Workload<WORK_ITEM> workload,
int startIndex,
int maxBranchesPerTree,
int maxSubTreesPerTree,
Optional<Integer> maxSubTreesForCurrentTreeOverride
);
}
| 3,950 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/util/nesting/workflow/AbstractNestingExecWorkflowImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.util.nesting.workflow;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.compress.utils.Lists;
import io.temporal.api.enums.v1.ParentClosePolicy;
import io.temporal.workflow.Async;
import io.temporal.workflow.ChildWorkflowOptions;
import io.temporal.workflow.Promise;
import io.temporal.workflow.Workflow;
import org.apache.gobblin.temporal.util.nesting.work.WorkflowAddr;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
/** Core skeleton of {@link NestingExecWorkflow}: realizing classes need only define {@link #launchAsyncActivity} */
@Slf4j
public abstract class AbstractNestingExecWorkflowImpl<WORK_ITEM, ACTIVITY_RESULT> implements NestingExecWorkflow<WORK_ITEM> {
public static final int NUM_SECONDS_TO_PAUSE_BEFORE_CREATING_SUB_TREE_DEFAULT = 10;
public static final int MAX_CHILD_SUB_TREE_LEAVES_BEFORE_SHOULD_PAUSE_DEFAULT = 100;
@Override
public int performWorkload(
final WorkflowAddr addr,
final Workload<WORK_ITEM> workload,
final int startIndex,
final int maxBranchesPerTree,
final int maxSubTreesPerTree,
final Optional<Integer> maxSubTreesForCurrentTreeOverride
) {
final int maxSubTreesForCurrent = maxSubTreesForCurrentTreeOverride.orElse(maxSubTreesPerTree);
final int maxLeaves = maxBranchesPerTree - maxSubTreesForCurrent;
final Optional<Workload.WorkSpan<WORK_ITEM>> optSpan = workload.getSpan(startIndex, maxLeaves);
log.info("[" + addr + "] " + workload + " w/ start '" + startIndex + "'"
+ "; tree (" + maxBranchesPerTree + "/" + maxSubTreesPerTree + "): " + optSpan);
if (!optSpan.isPresent()) {
return 0;
} else {
final Workload.WorkSpan<WORK_ITEM> workSpan = optSpan.get();
final Iterable<WORK_ITEM> iterable = () -> workSpan;
final List<Promise<ACTIVITY_RESULT>> childActivities = StreamSupport.stream(iterable.spliterator(), false)
.map(t -> launchAsyncActivity(t))
.collect(Collectors.toList());
final List<Promise<Integer>> childSubTrees = new ArrayList<>();
if (workSpan.getNumElems() == maxLeaves) { // received as many as requested (did not stop short)
int subTreeId = 0;
for (int subTreeChildMaxSubTreesPerTree
: consolidateSubTreeGrandChildren(maxSubTreesForCurrent, maxBranchesPerTree, maxSubTreesPerTree)) {
// CAUTION: calc these *before* incrementing `subTreeId`!
final int childStartIndex = startIndex + maxLeaves + (maxBranchesPerTree * subTreeId);
final int nextChildId = maxLeaves + subTreeId;
final WorkflowAddr childAddr = addr.createChild(nextChildId);
final NestingExecWorkflow<WORK_ITEM> child = createChildWorkflow(childAddr);
if (!workload.isIndexKnownToExceed(childStartIndex)) { // best-effort short-circuiting
// IMPORTANT: insert pause before launch of each child workflow that may have direct leaves of its own. periodic pauses spread the load on the
// temporal server, to avoid a sustained burst from submitting potentially very many async activities over the full hierarchical elaboration
final int numDirectLeavesChildMayHave = maxBranchesPerTree - subTreeChildMaxSubTreesPerTree;
if (numDirectLeavesChildMayHave > 0) {
Workflow.sleep(calcPauseDurationBeforeCreatingSubTree(numDirectLeavesChildMayHave));
}
childSubTrees.add(
Async.function(child::performWorkload, childAddr, workload, childStartIndex, maxBranchesPerTree,
maxSubTreesPerTree, Optional.of(subTreeChildMaxSubTreesPerTree)));
++subTreeId;
}
}
}
final Promise<Void> allActivityChildren = Promise.allOf(childActivities);
allActivityChildren.get(); // ensure all complete prior to counting them in `overallActivitiesRollupCount`
// TODO: determine whether any benefit to unordered `::get` blocking for any next ready (perhaps no difference...)
final int descendantActivitiesRollupCount = childSubTrees.stream().map(Promise::get).reduce(0, (x, y) -> x + y);
// TODO: consider a generalized reduce op for things other than counting!
final int overallActivitiesRollupCount = workSpan.getNumElems() + descendantActivitiesRollupCount;
log.info("[" + addr + "] activites finished coordinating: " + overallActivitiesRollupCount);
return overallActivitiesRollupCount;
}
}
/** Factory for invoking the specific activity by providing it args via {@link Async::function} */
protected abstract Promise<ACTIVITY_RESULT> launchAsyncActivity(WORK_ITEM task);
protected NestingExecWorkflow<WORK_ITEM> createChildWorkflow(final WorkflowAddr childAddr) {
// preserve the current workflow ID of this parent, but add the (hierarchical) address extension specific to each child
String thisWorkflowId = Workflow.getInfo().getWorkflowId();
String childWorkflowId = thisWorkflowId.replaceAll("-[^-]+$", "") + "-" + childAddr;
ChildWorkflowOptions childOpts = ChildWorkflowOptions.newBuilder()
.setParentClosePolicy(ParentClosePolicy.PARENT_CLOSE_POLICY_ABANDON)
.setWorkflowId(childWorkflowId)
.build();
return Workflow.newChildWorkflowStub(NestingExecWorkflow.class, childOpts);
}
/** @return how long to pause prior to creating a child workflow, based on `numDirectLeavesChildMayHave` */
protected Duration calcPauseDurationBeforeCreatingSubTree(int numDirectLeavesChildMayHave) {
// (only pause when an appreciable number of leaves)
// TODO: use a configuration value, for simpler adjustment, rather than hard-code
return numDirectLeavesChildMayHave > MAX_CHILD_SUB_TREE_LEAVES_BEFORE_SHOULD_PAUSE_DEFAULT
? Duration.ofSeconds(NUM_SECONDS_TO_PAUSE_BEFORE_CREATING_SUB_TREE_DEFAULT)
: Duration.ZERO;
}
/**
* "right-tilt" sub-tree's grandchildren, so final child gets all grandchildren (vs. constant grandchildren/child)
* i.e. NOT!:
* List<Integer> naiveUniformity = Collections.nCopies(numSubTreesPerSubTree, numSubTreeChildren);
* @return each sub-tree's desired size, in ascending sub-tree order
*/
protected static List<Integer> consolidateSubTreeGrandChildren(
final int numSubTreesPerSubTree,
final int numChildrenTotal,
final int numSubTreeChildren
) {
if (numSubTreesPerSubTree <= 0) {
return Lists.newArrayList();
} else if (isSqrt(numSubTreeChildren, numChildrenTotal)) {
// redistribute all grandchild sub-trees to pack every grandchild beneath the final child sub-tree
final List<Integer> grandChildCounts = new ArrayList<>(Collections.nCopies(numSubTreesPerSubTree - 1, 0));
grandChildCounts.add(numChildrenTotal);
return grandChildCounts;
} else {
final int totalGrandChildSubTrees = numSubTreesPerSubTree * numSubTreeChildren;
final int numTreesWithSolelySubTreeBranches = totalGrandChildSubTrees / numChildrenTotal;
final int numSubTreesRemaining = totalGrandChildSubTrees % numChildrenTotal;
assert (numTreesWithSolelySubTreeBranches == 1 && numSubTreesRemaining == 0) || numTreesWithSolelySubTreeBranches == 0
: "present limitation: at most one sub-tree may use further branching: (found: numSubTreesPerSubTree: "
+ numSubTreesPerSubTree + "; numChildrenTotal: " + numChildrenTotal + " / numSubTreeChildren: "
+ numSubTreeChildren + ")";
final List<Integer> grandChildCounts = new ArrayList<>(Collections.nCopies(numSubTreesPerSubTree - (numTreesWithSolelySubTreeBranches + 1), 0));
grandChildCounts.addAll(Collections.nCopies(Math.min(1, numSubTreesPerSubTree - numTreesWithSolelySubTreeBranches), numSubTreesRemaining));
grandChildCounts.addAll(Collections.nCopies(Math.min(numTreesWithSolelySubTreeBranches, numSubTreesPerSubTree), numChildrenTotal));
return grandChildCounts;
}
}
/** @return whether `maxSubTrees` == `Math.sqrt(maxBranches)` */
private static boolean isSqrt(int maxSubTrees, int maxBranches) {
return maxSubTrees > 0 && maxSubTrees * maxSubTrees == maxBranches;
}
}
| 3,951 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/launcher/ProcessWorkUnitsJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.launcher;
import java.net.URI;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import com.typesafe.config.ConfigFactory;
import io.temporal.client.WorkflowOptions;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.cluster.GobblinTemporalTaskRunner;
import org.apache.gobblin.temporal.ddm.work.WUProcessingSpec;
import org.apache.gobblin.temporal.ddm.work.assistance.Help;
import org.apache.gobblin.temporal.ddm.workflow.ProcessWorkUnitsWorkflow;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobLauncher;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobScheduler;
import org.apache.gobblin.util.PropertiesUtils;
import static org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX;
/**
* A {@link JobLauncher} for the initial triggering of a Temporal workflow that executes {@link WorkUnit}s to fulfill
* the work they specify. see: {@link ProcessWorkUnitsWorkflow}
*
* <p>
* This class is instantiated by the {@link GobblinTemporalJobScheduler#buildJobLauncher(Properties)} on every job submission to launch the Gobblin job.
* The actual task execution happens in the {@link GobblinTemporalTaskRunner}, usually in a different process.
* </p>
*/
@Slf4j
public class ProcessWorkUnitsJobLauncher extends GobblinTemporalJobLauncher {
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_NAME_NODE_URI = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "name.node.uri";
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_UNITS_DIR = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "work.units.dir";
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_BRANCHES_PER_TREE = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "work.max.branches.per.tree";
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_SUB_TREES_PER_TREE = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "work.max.sub.trees.per.tree";
public static final String WORKFLOW_ID_BASE = "ProcessWorkUnits";
public ProcessWorkUnitsJobLauncher(
Properties jobProps,
Path appWorkDir,
List<? extends Tag<?>> metadataTags,
ConcurrentHashMap<String, Boolean> runningMap
) throws Exception {
super(jobProps, appWorkDir, metadataTags, runningMap);
}
@Override
public void submitJob(List<WorkUnit> workunits) {
try {
URI nameNodeUri = new URI(PropertiesUtils.getRequiredProp(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_NAME_NODE_URI));
// NOTE: `Path` is challenging for temporal to ser/de, but nonetheless do pre-construct as `Path`, to pre-validate this prop string's contents
Path workUnitsDir = new Path(PropertiesUtils.getRequiredProp(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_UNITS_DIR));
WUProcessingSpec wuSpec = new WUProcessingSpec(nameNodeUri, workUnitsDir.toString());
if (this.jobProps.containsKey(GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_BRANCHES_PER_TREE) &&
this.jobProps.containsKey(GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_SUB_TREES_PER_TREE)) {
int maxBranchesPerTree = PropertiesUtils.getRequiredPropAsInt(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_BRANCHES_PER_TREE);
int maxSubTreesPerTree = PropertiesUtils.getRequiredPropAsInt(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_WORK_MAX_SUB_TREES_PER_TREE);
wuSpec.setTuning(new WUProcessingSpec.Tuning(maxBranchesPerTree, maxSubTreesPerTree));
}
WorkflowOptions options = WorkflowOptions.newBuilder()
.setTaskQueue(this.queueName)
.setWorkflowId(Help.qualifyNamePerExec(WORKFLOW_ID_BASE, wuSpec, ConfigFactory.parseProperties(jobProps)))
.build();
ProcessWorkUnitsWorkflow workflow = this.client.newWorkflowStub(ProcessWorkUnitsWorkflow.class, options);
workflow.process(wuSpec);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 3,952 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/util/JobStateUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.util;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import lombok.extern.slf4j.Slf4j;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.typesafe.config.ConfigFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskState;
/**
* Utilities for applying {@link JobState} info to various ends:
* - creating a {@link SharedResourcesBroker}
* - obtaining a {@link StateStore<TaskState>}
*/
@Slf4j
public class JobStateUtils {
private static final String OUTPUT_DIR_NAME = "output"; // following MRJobLauncher.OUTPUT_DIR_NAME
// reuse same handle among activities executed by the same worker
private static final transient Cache<Path, StateStore<TaskState>> taskStateStoreByPath = CacheBuilder.newBuilder().build();
private JobStateUtils() {}
public static StateStore<TaskState> openTaskStateStore(JobState jobState, FileSystem fs) {
try {
Path taskStateStorePath = JobStateUtils.getTaskStateStorePath(jobState, fs);
return taskStateStoreByPath.get(taskStateStorePath, () ->
openTaskStateStoreUncached(jobState, fs)
);
} catch (ExecutionException ee) {
throw new RuntimeException(ee);
}
}
public static StateStore<TaskState> openTaskStateStoreUncached(JobState jobState, FileSystem fs) {
Path taskStateStorePath = JobStateUtils.getTaskStateStorePath(jobState, fs);
log.info("opening FS task state store at path '{}'", taskStateStorePath);
return new FsStateStore<>(fs, taskStateStorePath.toUri().getPath(), TaskState.class);
}
/**
* ATTENTION: derives path according to {@link org.apache.gobblin.runtime.mapreduce.MRJobLauncher} conventions, using same
* {@link ConfigurationKeys#MR_JOB_ROOT_DIR_KEY}
* @return path to {@link FsStateStore<TaskState>} backing dir
*/
public static Path getTaskStateStorePath(JobState jobState, FileSystem fs) {
Properties jobProps = jobState.getProperties();
Path jobOutputPath = new Path(
new Path(
new Path(
jobProps.getProperty(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY),
JobState.getJobNameFromProps(jobProps)),
JobState.getJobIdFromProps(jobProps)),
OUTPUT_DIR_NAME);
return fs.makeQualified(jobOutputPath);
}
public static SharedResourcesBroker<GobblinScopeTypes> getSharedResourcesBroker(JobState jobState) {
SharedResourcesBroker<GobblinScopeTypes> globalBroker =
SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.parseProperties(jobState.getProperties()),
GobblinScopeTypes.GLOBAL.defaultScopeInstance());
return globalBroker.newSubscopedBuilder(new JobScopeInstance(jobState.getJobName(), jobState.getJobId())).build();
}
}
| 3,953 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/activity/ProcessWorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.activity;
import io.temporal.activity.ActivityInterface;
import io.temporal.activity.ActivityMethod;
import org.apache.gobblin.temporal.ddm.work.WorkUnitClaimCheck;
/** Activity for processing/executing a {@link org.apache.gobblin.source.workunit.WorkUnit}, provided by claim-check */
@ActivityInterface
public interface ProcessWorkUnit {
@ActivityMethod
// CAUTION: void return type won't work, as apparently it mayn't be the return type for `io.temporal.workflow.Functions.Func1`!
int processWorkUnit(WorkUnitClaimCheck wu);
}
| 3,954 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/activity | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/activity/impl/ProcessWorkUnitImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.activity.impl;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import com.google.common.collect.Lists;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.runtime.AbstractTaskStateTracker;
import org.apache.gobblin.runtime.GobblinMultiTaskAttempt;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.Task;
import org.apache.gobblin.runtime.TaskCreationException;
import org.apache.gobblin.runtime.TaskExecutor;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.runtime.TaskStateTracker;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter;
import org.apache.gobblin.runtime.troubleshooter.NoopAutomaticTroubleshooter;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.ddm.activity.ProcessWorkUnit;
import org.apache.gobblin.temporal.ddm.util.JobStateUtils;
import org.apache.gobblin.temporal.ddm.work.assistance.Help;
import org.apache.gobblin.temporal.ddm.work.WorkUnitClaimCheck;
import org.apache.gobblin.util.JobLauncherUtils;
@Slf4j
public class ProcessWorkUnitImpl implements ProcessWorkUnit {
private static final int LOG_EXTENDED_PROPS_EVERY_WORK_UNITS_STRIDE = 100;
@Override
public int processWorkUnit(WorkUnitClaimCheck wu) {
try (FileSystem fs = Help.loadFileSystemForce(wu)) {
List<WorkUnit> workUnits = loadFlattenedWorkUnits(wu, fs);
log.info("WU [{}] - loaded {} workUnits", wu.getCorrelator(), workUnits.size());
JobState jobState = Help.loadJobState(wu, fs);
return execute(workUnits, wu, jobState, fs);
} catch (IOException | InterruptedException e) {
throw new RuntimeException(e);
}
}
protected List<WorkUnit> loadFlattenedWorkUnits(WorkUnitClaimCheck wu, FileSystem fs) throws IOException {
Path wuPath = new Path(wu.getWorkUnitPath());
WorkUnit workUnit = JobLauncherUtils.createEmptyWorkUnitPerExtension(wuPath);
Help.deserializeStateWithRetries(fs, wuPath, workUnit, wu);
return JobLauncherUtils.flattenWorkUnits(Lists.newArrayList(workUnit));
}
/**
* NOTE: adapted from {@link org.apache.gobblin.runtime.mapreduce.MRJobLauncher.TaskRunner#run(org.apache.hadoop.mapreduce.Mapper.Context)}
* @return count of how many tasks executed (0 if execution ultimately failed, but we *believe* TaskState should already have been recorded beforehand)
*/
protected int execute(List<WorkUnit> workUnits, WorkUnitClaimCheck wu, JobState jobState, FileSystem fs) throws IOException, InterruptedException {
String containerId = "container-id-for-wu-" + wu.getCorrelator();
StateStore<TaskState> taskStateStore = Help.openTaskStateStore(wu, fs);
TaskStateTracker taskStateTracker = createEssentializedTaskStateTracker(wu);
TaskExecutor taskExecutor = new TaskExecutor(new Properties());
GobblinMultiTaskAttempt.CommitPolicy multiTaskAttemptCommitPolicy = GobblinMultiTaskAttempt.CommitPolicy.IMMEDIATE; // as no speculative exec
SharedResourcesBroker<GobblinScopeTypes> resourcesBroker = JobStateUtils.getSharedResourcesBroker(jobState);
AutomaticTroubleshooter troubleshooter = new NoopAutomaticTroubleshooter();
// AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(wu.getStateConfig().getProperties()));
troubleshooter.start();
List<String> fileSourcePaths = workUnits.stream()
.map(workUnit -> describeAsCopyableFile(workUnit, wu.getWorkUnitPath()))
.collect(Collectors.toList());
log.info("WU [{}] - submitting {} workUnits for copying files: {}", wu.getCorrelator(),
workUnits.size(), fileSourcePaths);
log.debug("WU [{}] - (first) workUnit: {}", wu.getCorrelator(), workUnits.get(0).toJsonString());
try {
GobblinMultiTaskAttempt taskAttempt = GobblinMultiTaskAttempt.runWorkUnits(
jobState.getJobId(), containerId, jobState, workUnits,
taskStateTracker, taskExecutor, taskStateStore, multiTaskAttemptCommitPolicy,
resourcesBroker, troubleshooter.getIssueRepository(), createInterruptionPredicate(fs, jobState));
return taskAttempt.getNumTasksCreated();
} catch (TaskCreationException tce) { // derived type of `IOException` that ought not be caught!
throw tce;
} catch (IOException ioe) {
// presume execution already occurred, with `TaskState` written to reflect outcome
log.warn("WU [" + wu.getCorrelator() + "] - continuing on despite IOException:", ioe);
return 0;
}
}
/** Demonstration processing, to isolate debugging of WU loading and deserialization */
protected int countSumProperties(List<WorkUnit> workUnits, WorkUnitClaimCheck wu) {
int totalNumProps = workUnits.stream().mapToInt(workUnit -> workUnit.getPropertyNames().size()).sum();
log.info("opened WU [{}] to find {} properties total at '{}'", wu.getCorrelator(), totalNumProps, wu.getWorkUnitPath());
return totalNumProps;
}
protected TaskStateTracker createEssentializedTaskStateTracker(WorkUnitClaimCheck wu) {
return new AbstractTaskStateTracker(new Properties(), log) {
@Override
public void registerNewTask(Task task) {
// TODO: shall we schedule metrics update based on config?
}
@Override
public void onTaskRunCompletion(Task task) {
task.markTaskCompletion();
}
@Override
public void onTaskCommitCompletion(Task task) {
TaskState taskState = task.getTaskState();
// TODO: if metrics configured, report them now
log.info("WU [{} = {}] - finished commit after {}ms with state {}{}", wu.getCorrelator(), task.getTaskId(),
taskState.getTaskDuration(), taskState.getWorkingState(),
taskState.getWorkingState().equals(WorkUnitState.WorkingState.SUCCESSFUL)
? (" to: " + taskState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)) : "");
log.debug("WU [{} = {}] - task state: {}", wu.getCorrelator(), task.getTaskId(),
taskState.toJsonString(shouldUseExtendedLogging(wu)));
getOptCopyableFile(taskState).ifPresent(copyableFile -> {
log.info("WU [{} = {}] - completed copyableFile: {}", wu.getCorrelator(), task.getTaskId(),
copyableFile.toJsonString(shouldUseExtendedLogging(wu)));
});
}
};
}
protected String describeAsCopyableFile(WorkUnit workUnit, String workUnitPath) {
return getOptFirstCopyableFile(Lists.newArrayList(workUnit), workUnitPath)
.map(copyableFile -> copyableFile.getOrigin().getPath().toString())
.orElse(
"<<not a CopyableFile("
+ getOptCopyEntityClass(workUnit, workUnitPath)
.map(Class::getSimpleName)
.orElse("<<not a CopyEntity!>>")
+ "): '" + workUnitPath + "'"
);
}
protected Optional<CopyableFile> getOptCopyableFile(TaskState taskState) {
return getOptCopyableFile(taskState, "taskState '" + taskState.getTaskId() + "'");
}
protected Optional<CopyableFile> getOptFirstCopyableFile(List<WorkUnit> workUnits, String workUnitPath) {
return Optional.of(workUnits).filter(wus -> wus.size() > 0).flatMap(wus ->
getOptCopyableFile(wus.get(0), "workUnit '" + workUnitPath + "'")
);
}
protected Optional<CopyableFile> getOptCopyableFile(State state, String logDesc) {
return getOptCopyEntityClass(state, logDesc).flatMap(copyEntityClass -> {
log.debug("(state) {} got (copyEntity) {}", state.getClass().getName(), copyEntityClass.getName());
if (CopyableFile.class.isAssignableFrom(copyEntityClass)) {
String serialization = state.getProp(CopySource.SERIALIZED_COPYABLE_FILE);
if (serialization != null) {
return Optional.of((CopyableFile) CopyEntity.deserialize(serialization));
}
}
return Optional.empty();
});
}
protected Optional<Class<?>> getOptCopyEntityClass(State state, String logDesc) {
try {
return Optional.of(CopySource.getCopyEntityClass(state));
} catch (IOException ioe) {
log.warn(logDesc + " - failed getting copy entity class:", ioe);
return Optional.empty();
}
}
protected Predicate<GobblinMultiTaskAttempt> createInterruptionPredicate(FileSystem fs, JobState jobState) {
// TODO - decide whether to support... and if so, employ a useful path; otherwise, just evaluate predicate to always false
Path interruptionPath = new Path("/not/a/real/path/that/should/ever/exist!");
return createInterruptionPredicate(fs, interruptionPath);
}
protected Predicate<GobblinMultiTaskAttempt> createInterruptionPredicate(FileSystem fs, Path interruptionPath) {
return (gmta) -> {
try {
return fs.exists(interruptionPath);
} catch (IOException ioe) {
return false;
}
};
}
protected boolean shouldUseExtendedLogging(WorkUnitClaimCheck wu) {
try {
return Long.parseLong(wu.getCorrelator()) % LOG_EXTENDED_PROPS_EVERY_WORK_UNITS_STRIDE == 0;
} catch (NumberFormatException nfe) {
log.warn("unexpected, non-numeric correlator: '{}'", wu.getCorrelator());
return false;
}
}
}
| 3,955 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/EagerFsDirBackedWorkUnitClaimCheckWorkload.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work;
import java.net.URI;
import java.util.Comparator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.hadoop.fs.FileStatus;
/**
* {@link AbstractEagerFsDirBackedWorkload} for {@link WorkUnitClaimCheck} `WORK_ITEM`s, which uses {@link WorkUnitClaimCheck#getWorkUnitPath()}
* for their total-ordering.
*/
@lombok.NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@lombok.ToString(callSuper = true)
public class EagerFsDirBackedWorkUnitClaimCheckWorkload extends AbstractEagerFsDirBackedWorkload<WorkUnitClaimCheck> {
public EagerFsDirBackedWorkUnitClaimCheckWorkload(URI fileSystemUri, String hdfsDir) {
super(fileSystemUri, hdfsDir);
}
@Override
protected WorkUnitClaimCheck fromFileStatus(FileStatus fileStatus) {
// begin by setting all correlators to empty
return new WorkUnitClaimCheck("", this.getFileSystemUri(), fileStatus.getPath().toString());
}
@Override
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected Comparator<WorkUnitClaimCheck> getWorkItemComparator() {
return Comparator.comparing(WorkUnitClaimCheck::getWorkUnitPath);
}
@Override
protected void acknowledgeOrdering(int index, WorkUnitClaimCheck item) {
// later, after the post-total-ordering indices are know, use each item's index as its correlator
item.setCorrelator(Integer.toString(index));
}
}
| 3,956 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/AbstractEagerFsDirBackedWorkload.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Optional;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemApt;
import org.apache.gobblin.temporal.util.nesting.work.SeqSliceBackedWorkSpan;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
import org.apache.gobblin.util.HadoopUtils;
/**
* {@link Workload} of `WORK_ITEM`s (as defined by derived class) that originates from the eagerly loaded contents of
* the directory `fsDir` within the {@link FileSystem} at `nameNodeUri`.
*
* IMPORTANT: to abide by Temporal's required determinism, a derived class must provide a {@link Comparator} for the
* *total ordering* of `WORK_ITEM`s.
*/
@lombok.NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@lombok.RequiredArgsConstructor
@lombok.ToString(exclude = { "stateConfig", "cachedWorkItems" })
@Slf4j
public abstract class AbstractEagerFsDirBackedWorkload<WORK_ITEM> implements Workload<WORK_ITEM>, FileSystemApt {
@Getter
@NonNull private URI fileSystemUri;
// NOTE: use `String` rather than `Path` to avoid: com.fasterxml.jackson.databind.exc.MismatchedInputException:
// Cannot construct instance of `org.apache.hadoop.fs.Path` (although at least one Creator exists):
// cannot deserialize from Object value (no delegate- or property-based Creator)
@NonNull private String fsDir;
@Getter(AccessLevel.PROTECTED) @Setter(AccessLevel.PROTECTED)
private transient volatile WORK_ITEM[] cachedWorkItems = null;
@Override
public Optional<Workload.WorkSpan<WORK_ITEM>> getSpan(final int startIndex, final int numElements) {
WORK_ITEM[] workItems = getCachedWorkItems();
if (startIndex >= workItems.length || startIndex < 0) {
return Optional.empty();
} else {
return Optional.of(new SeqSliceBackedWorkSpan<>(workItems, startIndex, numElements));
}
}
@Override
public boolean isIndexKnownToExceed(final int index) {
return isDefiniteSize() && cachedWorkItems != null && index >= cachedWorkItems.length;
}
@Override
@JsonIgnore // (because no-arg method resembles 'java bean property')
public boolean isDefiniteSize() {
return true;
}
protected abstract WORK_ITEM fromFileStatus(FileStatus fileStatus);
/**
* IMPORTANT: to satisfy Temporal's required determinism, the `WORK_ITEM`s need a consistent total ordering
* WARNING: this works so long as dir contents are unchanged in iterim
* TODO: handle case of dir contents growing (e.g. use timestamp to filter out newer paths)... how could we handle the case of shrinking/deletion?
*/
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected abstract Comparator<WORK_ITEM> getWorkItemComparator();
/** Hook for each `WORK_ITEM` to be associated with its final, post-sorting ordinal index */
protected void acknowledgeOrdering(int index, WORK_ITEM workItem) {
// no-op
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected PathFilter getPathFilter() {
return f -> true;
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected final synchronized WORK_ITEM[] getCachedWorkItems() {
if (cachedWorkItems != null) {
return cachedWorkItems;
}
try (FileSystem fs = loadFileSystem()) {
FileStatus[] fileStatuses = fs.listStatus(new Path(fsDir), this.getPathFilter());
log.info("loaded {} paths from '{}'", fileStatuses.length, fsDir);
WORK_ITEM[] workItems = (WORK_ITEM[])Stream.of(fileStatuses).map(this::fromFileStatus).toArray(Object[]::new);
sortWorkItems(workItems);
IntStream.range(0, workItems.length)
.forEach(i -> this.acknowledgeOrdering(i, workItems[i]));
cachedWorkItems = workItems;
return cachedWorkItems;
} catch (FileNotFoundException fnfe) {
throw new RuntimeException("directory not found: '" + fsDir + "'");
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
@Override
public State getFileSystemConfig() {
return new State(); // TODO - figure out how to truly set!
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
protected FileSystem loadFileSystem() throws IOException {
return HadoopUtils.getFileSystem(this.fileSystemUri, this.getFileSystemConfig());
}
private void sortWorkItems(WORK_ITEM[] workItems) {
Arrays.sort(workItems, getWorkItemComparator());
}
}
| 3,957 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/WUProcessingSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work;
import java.net.URI;
import java.util.Optional;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemApt;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemJobStateful;
import org.apache.gobblin.temporal.util.nesting.work.WorkflowAddr;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
/**
* Intended to reference multiple {@link org.apache.gobblin.source.workunit.WorkUnit}s to process, where `workUnitsDir`
* is resolved against the {@link org.apache.hadoop.fs.FileSystem} given by `nameNodeUri`. see:
*/
@Data
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@RequiredArgsConstructor
public class WUProcessingSpec implements FileSystemApt, FileSystemJobStateful {
@NonNull private URI fileSystemUri;
@NonNull private String workUnitsDir;
@NonNull private Tuning tuning = Tuning.DEFAULT;
@JsonIgnore // (because no-arg method resembles 'java bean property')
@Override
public State getFileSystemConfig() {
return new State(); // TODO - figure out how to truly set!
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
@Override
public Path getJobStatePath() {
// TODO: decide whether wise to hard-code... (per `MRJobLauncher` conventions, we expect job state file to be sibling of WU dir)
return new Path(new Path(workUnitsDir).getParent(), AbstractJobLauncher.JOB_STATE_FILE_NAME);
}
/** Configuration for {@link org.apache.gobblin.temporal.util.nesting.workflow.NestingExecWorkflow#performWorkload(WorkflowAddr, Workload, int, int, int, Optional)}*/
@Data
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@RequiredArgsConstructor
public static class Tuning {
public static final int DEFAULT_MAX_BRANCHES_PER_TREE = 900;
public static final int DEFAULT_SUB_TREES_PER_TREE = 30;
public static final Tuning DEFAULT = new Tuning(DEFAULT_MAX_BRANCHES_PER_TREE, DEFAULT_SUB_TREES_PER_TREE);
@NonNull private int maxBranchesPerTree;
@NonNull private int maxSubTreesPerTree;
}
}
| 3,958 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/WorkUnitClaimCheck.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work;
import java.net.URI;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemApt;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemJobStateful;
/**
* Conveys a {@link org.apache.gobblin.source.workunit.WorkUnit} by claim-check, where the `workUnitPath` is resolved
* against the {@link org.apache.hadoop.fs.FileSystem} given by `nameNodeUri`. see:
* @see <a href="https://learn.microsoft.com/en-us/azure/architecture/patterns/claim-check">Claim-Check Pattern</a>
*/
@Data
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@RequiredArgsConstructor
public class WorkUnitClaimCheck implements FileSystemApt, FileSystemJobStateful {
@NonNull private String correlator;
@NonNull private URI fileSystemUri;
@NonNull private String workUnitPath;
@JsonIgnore // (because no-arg method resembles 'java bean property')
@Override
public State getFileSystemConfig() {
return new State(); // TODO - figure out how to truly set!
}
@JsonIgnore // (because no-arg method resembles 'java bean property')
@Override
public Path getJobStatePath() {
// TODO: decide whether wise to hard-code... (per `MRJobLauncher` conventions, we expect job state file to be sibling of WU dir)
return new Path(new Path(workUnitPath).getParent().getParent(), AbstractJobLauncher.JOB_STATE_FILE_NAME);
}
}
| 3,959 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/assistance/Help.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work.assistance;
import java.io.IOException;
import java.net.URI;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.ExecutionException;
import lombok.extern.slf4j.Slf4j;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.typesafe.config.Config;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.temporal.ddm.util.JobStateUtils;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemJobStateful;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemApt;
import org.apache.gobblin.temporal.ddm.work.styles.JobStateful;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.SerializationUtils;
/** Various capabilities useful in implementing Distributed Data Movement (DDM) */
@Slf4j
public class Help {
public static final int MAX_DESERIALIZATION_FS_LOAD_ATTEMPTS = 5;
public static final int LOG_CACHE_STATS_EVERY_N_ACCESSES = 1000;
public static final String AZKABAN_FLOW_EXEC_ID_KEY = "azkaban.flow.execid";
public static final String USER_TO_PROXY_KEY = "user.to.proxy";
// treat `JobState` as immutable and cache, for reuse among activities executed by the same worker
private static final transient Cache<Path, JobState> jobStateByPath = CacheBuilder.newBuilder().recordStats().build();
private static final transient AtomicInteger jobStateAccessCount = new AtomicInteger(0);
private Help() {}
public static String qualifyNamePerExec(String name, FileSystemJobStateful f, Config workerConfig) {
return name + "_" + calcPerExecQualifier(f, workerConfig);
}
public static String qualifyNamePerExec(String name, Config workerConfig) {
return name + "_" + calcPerExecQualifier(workerConfig);
}
public static String calcPerExecQualifier(FileSystemJobStateful f, Config workerConfig) {
Optional<String> optFlowExecId = Optional.empty();
try {
optFlowExecId = Optional.of(loadJobState(f).getProp(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, null));
} catch (IOException e) {
log.warn("unable to loadJobState", e);
}
return optFlowExecId.map(x -> x + "_").orElse("") + calcPerExecQualifier(workerConfig);
}
public static String calcPerExecQualifier(Config workerConfig) {
String userToProxy = workerConfig.hasPath(USER_TO_PROXY_KEY)
? workerConfig.getString(USER_TO_PROXY_KEY) : "";
String azFlowExecId = workerConfig.hasPath(AZKABAN_FLOW_EXEC_ID_KEY)
? workerConfig.getString(AZKABAN_FLOW_EXEC_ID_KEY) : UUID.randomUUID().toString();
return userToProxy + "_" + azFlowExecId;
}
public static FileSystem loadFileSystem(FileSystemApt a) throws IOException {
// NOTE: `FileSystem.get` appears to implement caching, which should facilitate sharing among activities executing on the same worker
return loadFileSystemForUri(a.getFileSystemUri(), a.getFileSystemConfig());
}
public static FileSystem loadFileSystemForUri(URI fsUri, State fsConfig) throws IOException {
// TODO - determine whether this works... unclear whether it led to "FS closed", or that had another cause...
// return HadoopUtils.getFileSystem(fsUri, fsConfig);
Configuration conf = HadoopUtils.getConfFromState(fsConfig);
return FileSystem.get(fsUri, conf);
}
public static FileSystem loadFileSystemForce(FileSystemApt a) throws IOException {
return loadFileSystemForUriForce(a.getFileSystemUri(), a.getFileSystemConfig());
}
public static FileSystem loadFileSystemForUriForce(URI fsUri, State fsConfig) throws IOException {
// for reasons still not fully understood, we encountered many "FS closed" failures before disabling HDFS caching--especially as num WUs increased.
// perhaps caching-facilitated reuse of the same FS across multiple WUs caused prior WU execs to leave the FS in a problematic state for subsequent execs
// TODO - more investigation to sort out the true RC... and whether caching definitively is or is not possible for use here!
// return HadoopUtils.getFileSystem(fsUri, fsConfig);
Configuration conf = HadoopUtils.getConfFromState(fsConfig);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
return FileSystem.get(fsUri, conf);
}
public static JobState loadJobState(FileSystemJobStateful f) throws IOException {
try (FileSystem fs = loadFileSystemForce(f)) {
return loadJobState(f, fs);
}
}
public static JobState loadJobState(JobStateful js, FileSystem fs) throws IOException {
try {
incrementJobStateAccess();
return jobStateByPath.get(js.getJobStatePath(), () ->
loadJobStateUncached(js, fs)
);
} catch (ExecutionException ee) {
throw new IOException(ee);
}
}
public static JobState loadJobStateUncached(JobStateful js, FileSystem fs) throws IOException {
JobState jobState = new JobState();
SerializationUtils.deserializeState(fs, js.getJobStatePath(), jobState);
log.info("loaded jobState from '{}': {}", js.getJobStatePath(), jobState.toJsonString(true));
return jobState;
}
public static JobState loadJobStateWithRetries(FileSystemJobStateful f) throws IOException {
try (FileSystem fs = loadFileSystemForce(f)) {
return loadJobStateWithRetries(f, fs);
}
}
public static JobState loadJobStateWithRetries(FileSystemJobStateful f, FileSystem fs) throws IOException {
try {
incrementJobStateAccess();
return jobStateByPath.get(f.getJobStatePath(), () ->
loadJobStateUncachedWithRetries(f, fs, f)
);
} catch (ExecutionException ee) {
throw new IOException(ee);
}
}
public static JobState loadJobStateUncachedWithRetries(JobStateful js, FileSystem fs, FileSystemApt fsApt) throws IOException {
JobState jobState = new JobState();
deserializeStateWithRetries(fs, js.getJobStatePath(), jobState, fsApt, MAX_DESERIALIZATION_FS_LOAD_ATTEMPTS);
log.info("loaded jobState from '{}': {}", js.getJobStatePath(), jobState.toJsonString(true));
return jobState;
}
public static <T extends State> void deserializeStateWithRetries(FileSystem fs, Path path, T state, FileSystemApt fsApt)
throws IOException {
deserializeStateWithRetries(fs, path, state, fsApt, MAX_DESERIALIZATION_FS_LOAD_ATTEMPTS);
}
// TODO: decide whether actually necessary... it was added in a fit of debugging "FS closed" errors
public static <T extends State> void deserializeStateWithRetries(FileSystem fs, Path path, T state, FileSystemApt fsApt, int maxAttempts)
throws IOException {
for (int i = 0; i < maxAttempts; ++i) {
if (i > 0) {
log.info("reopening FS '{}' to retry ({}) deserialization (attempt {})", fsApt.getFileSystemUri(),
state.getClass().getSimpleName(), i);
fs = Help.loadFileSystem(fsApt);
}
try {
SerializationUtils.deserializeState(fs, path, state);
return;
} catch (IOException ioe) {
if (ioe.getMessage().equals("Filesystem closed") && i < maxAttempts - 1) {
continue;
} else {
throw ioe;
}
}
}
}
public static StateStore<TaskState> openTaskStateStore(FileSystemJobStateful f) throws IOException {
try (FileSystem fs = Help.loadFileSystem(f)) {
return JobStateUtils.openTaskStateStore(Help.loadJobState(f, fs), fs);
}
}
public static StateStore<TaskState> openTaskStateStore(FileSystemJobStateful js, FileSystem fs) throws IOException {
return JobStateUtils.openTaskStateStoreUncached(loadJobState(js), fs);
// public static StateStore<TaskState> openTaskStateStore(JobStateful js, FileSystem fs) throws IOException {
// return JobStateUtils.openTaskStateStore(loadJobState(js, fs), fs);
}
private static void incrementJobStateAccess() {
int numAccesses = jobStateAccessCount.getAndIncrement();
if (numAccesses % LOG_CACHE_STATS_EVERY_N_ACCESSES == 0) {
log.info("JobState(numAccesses: {}) - {}", numAccesses, jobStateByPath.stats());
}
}
}
| 3,960 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/styles/FileSystemApt.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work.styles;
import java.net.URI;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.gobblin.configuration.State;
/** Marks a type that can indicate a {@link org.apache.hadoop.fs.FileSystem} via its {@link URI} and configuration */
public interface FileSystemApt {
URI getFileSystemUri();
@JsonIgnore // (because no-arg method resembles 'java bean property')
State getFileSystemConfig();
}
| 3,961 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/styles/FileSystemJobStateful.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work.styles;
/** Marks a type that can indicate both a {@link org.apache.hadoop.fs.FileSystem} and a {@link org.apache.gobblin.runtime.JobState} */
public interface FileSystemJobStateful extends JobStateful, FileSystemApt {
}
| 3,962 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/work/styles/JobStateful.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.work.styles;
import org.apache.hadoop.fs.Path;
/** Marks a type that can indicate a {@link org.apache.gobblin.runtime.JobState} via its {@link Path} */
public interface JobStateful {
Path getJobStatePath();
}
| 3,963 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/workflow/ProcessWorkUnitsWorkflow.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.workflow;
import io.temporal.workflow.WorkflowInterface;
import io.temporal.workflow.WorkflowMethod;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.ddm.work.WUProcessingSpec;
/** Temporal workflow for executing {@link WorkUnit}s to fulfill the work they specify. */
@WorkflowInterface
public interface ProcessWorkUnitsWorkflow {
/** @return the number of {@link WorkUnit}s cumulatively processed successfully */
@WorkflowMethod
int process(WUProcessingSpec wuSpec);
}
| 3,964 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/workflow | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/workflow/impl/NestingExecOfProcessWorkUnitWorkflowImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.workflow.impl;
import io.temporal.activity.ActivityOptions;
import io.temporal.common.RetryOptions;
import io.temporal.workflow.Async;
import io.temporal.workflow.Promise;
import io.temporal.workflow.Workflow;
import java.time.Duration;
import org.apache.gobblin.temporal.ddm.activity.ProcessWorkUnit;
import org.apache.gobblin.temporal.ddm.work.WorkUnitClaimCheck;
import org.apache.gobblin.temporal.util.nesting.workflow.AbstractNestingExecWorkflowImpl;
/** {@link org.apache.gobblin.temporal.util.nesting.workflow.NestingExecWorkflow} for {@link ProcessWorkUnit} */
public class NestingExecOfProcessWorkUnitWorkflowImpl extends AbstractNestingExecWorkflowImpl<WorkUnitClaimCheck, Integer> {
// RetryOptions specify how to automatically handle retries when Activities fail.
private static final RetryOptions ACTIVITY_RETRY_OPTS = RetryOptions.newBuilder()
.setInitialInterval(Duration.ofSeconds(3))
.setMaximumInterval(Duration.ofSeconds(100))
.setBackoffCoefficient(2)
.setMaximumAttempts(4)
.build();
private static final ActivityOptions ACTIVITY_OPTS = ActivityOptions.newBuilder()
.setStartToCloseTimeout(Duration.ofSeconds(999))
.setRetryOptions(ACTIVITY_RETRY_OPTS)
.build();
private final ProcessWorkUnit activityStub = Workflow.newActivityStub(ProcessWorkUnit.class, ACTIVITY_OPTS);
@Override
protected Promise<Integer> launchAsyncActivity(final WorkUnitClaimCheck wu) {
return Async.function(activityStub::processWorkUnit, wu);
}
}
| 3,965 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/workflow | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/workflow/impl/ProcessWorkUnitsWorkflowImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.workflow.impl;
import java.util.Optional;
import com.typesafe.config.ConfigFactory;
import io.temporal.api.enums.v1.ParentClosePolicy;
import io.temporal.workflow.ChildWorkflowOptions;
import io.temporal.workflow.Workflow;
import org.apache.gobblin.temporal.cluster.WorkerConfig;
import org.apache.gobblin.temporal.ddm.work.WUProcessingSpec;
import org.apache.gobblin.temporal.ddm.work.WorkUnitClaimCheck;
import org.apache.gobblin.temporal.ddm.work.assistance.Help;
import org.apache.gobblin.temporal.ddm.work.styles.FileSystemJobStateful;
import org.apache.gobblin.temporal.ddm.workflow.ProcessWorkUnitsWorkflow;
import org.apache.gobblin.temporal.ddm.work.EagerFsDirBackedWorkUnitClaimCheckWorkload;
import org.apache.gobblin.temporal.util.nesting.work.WorkflowAddr;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
import org.apache.gobblin.temporal.util.nesting.workflow.NestingExecWorkflow;
public class ProcessWorkUnitsWorkflowImpl implements ProcessWorkUnitsWorkflow {
public static final String CHILD_WORKFLOW_ID_BASE = "NestingExecWorkUnits";
@Override
public int process(WUProcessingSpec workSpec) {
Workload<WorkUnitClaimCheck> workload = createWorkload(workSpec);
NestingExecWorkflow<WorkUnitClaimCheck> processingWorkflow = createProcessingWorkflow(workSpec);
return processingWorkflow.performWorkload(
WorkflowAddr.ROOT, workload, 0,
workSpec.getTuning().getMaxBranchesPerTree(), workSpec.getTuning().getMaxSubTreesPerTree(), Optional.empty()
);
}
protected Workload<WorkUnitClaimCheck> createWorkload(WUProcessingSpec workSpec) {
return new EagerFsDirBackedWorkUnitClaimCheckWorkload(workSpec.getFileSystemUri(), workSpec.getWorkUnitsDir());
}
protected NestingExecWorkflow<WorkUnitClaimCheck> createProcessingWorkflow(FileSystemJobStateful f) {
ChildWorkflowOptions childOpts = ChildWorkflowOptions.newBuilder()
.setParentClosePolicy(ParentClosePolicy.PARENT_CLOSE_POLICY_ABANDON)
.setWorkflowId(Help.qualifyNamePerExec(CHILD_WORKFLOW_ID_BASE, f, WorkerConfig.of(this).orElse(ConfigFactory.empty())))
.build();
// TODO: to incorporate multiple different concrete `NestingExecWorkflow` sub-workflows in the same super-workflow... shall we use queues?!?!?
return Workflow.newChildWorkflowStub(NestingExecWorkflow.class, childOpts);
}
}
| 3,966 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/ddm/worker/WorkFulfillmentWorker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.ddm.worker;
import java.util.concurrent.TimeUnit;
import com.typesafe.config.Config;
import io.temporal.client.WorkflowClient;
import io.temporal.worker.WorkerOptions;
import org.apache.gobblin.temporal.cluster.AbstractTemporalWorker;
import org.apache.gobblin.temporal.ddm.activity.impl.ProcessWorkUnitImpl;
import org.apache.gobblin.temporal.ddm.workflow.impl.NestingExecOfProcessWorkUnitWorkflowImpl;
import org.apache.gobblin.temporal.ddm.workflow.impl.ProcessWorkUnitsWorkflowImpl;
/** Worker for the {@link ProcessWorkUnitsWorkflowImpl} super-workflow */
public class WorkFulfillmentWorker extends AbstractTemporalWorker {
public static final long DEADLOCK_DETECTION_TIMEOUT_SECONDS = 120;
public static final int MAX_EXECUTION_CONCURRENCY = 3;
public WorkFulfillmentWorker(Config config, WorkflowClient workflowClient) {
super(config, workflowClient);
}
@Override
protected Class<?>[] getWorkflowImplClasses() {
return new Class[] { ProcessWorkUnitsWorkflowImpl.class, NestingExecOfProcessWorkUnitWorkflowImpl.class };
}
@Override
protected Object[] getActivityImplInstances() {
return new Object[] { new ProcessWorkUnitImpl() };
}
@Override
protected WorkerOptions createWorkerOptions() {
return WorkerOptions.newBuilder()
// default is only 1s - WAY TOO SHORT for `o.a.hadoop.fs.FileSystem#listStatus`!
.setDefaultDeadlockDetectionTimeout(TimeUnit.SECONDS.toMillis(DEADLOCK_DETECTION_TIMEOUT_SECONDS))
.setMaxConcurrentActivityExecutionSize(MAX_EXECUTION_CONCURRENCY)
.setMaxConcurrentLocalActivityExecutionSize(MAX_EXECUTION_CONCURRENCY)
.setMaxConcurrentWorkflowTaskExecutionSize(MAX_EXECUTION_CONCURRENCY)
.build();
}
}
| 3,967 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/FormatActivityImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
public class FormatActivityImpl implements FormatActivity {
@Override
public String composeGreeting(String name) {
return "Hello " + name + "!";
}
}
| 3,968 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/GreetingWorkflowImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
import java.time.Duration;
import io.temporal.activity.ActivityOptions;
import io.temporal.workflow.Workflow;
public class GreetingWorkflowImpl implements GreetingWorkflow {
/*
* At least one of the following options needs to be defined:
* - setStartToCloseTimeout
* - setScheduleToCloseTimeout
*/
ActivityOptions options = ActivityOptions.newBuilder()
.setStartToCloseTimeout(Duration.ofSeconds(60))
.build();
/*
* Define the HelloWorldActivity stub. Activity stubs are proxies for activity invocations that
* are executed outside of the workflow thread on the activity worker, that can be on a
* different host. Temporal is going to dispatch the activity results back to the workflow and
* unblock the stub as soon as activity is completed on the activity worker.
*
* The activity options that were defined above are passed in as a parameter.
*/
private final FormatActivity activity = Workflow.newActivityStub(FormatActivity.class, options);
// This is the entry point to the Workflow.
@Override
public String getGreeting(String name) {
/**
* If there were other Activity methods they would be orchestrated here or from within other Activities.
* This is a blocking call that returns only after the activity has completed.
*/
return activity.composeGreeting(name);
}
}
| 3,969 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/HelloWorldWorker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
import com.typesafe.config.Config;
import io.temporal.client.WorkflowClient;
import org.apache.gobblin.temporal.cluster.AbstractTemporalWorker;
public class HelloWorldWorker extends AbstractTemporalWorker {
public HelloWorldWorker(Config config, WorkflowClient workflowClient) {
super(config, workflowClient);
}
@Override
protected Class<?>[] getWorkflowImplClasses() {
return new Class[] { GreetingWorkflowImpl.class };
}
@Override
protected Object[] getActivityImplInstances() {
return new Object[] { new FormatActivityImpl() };
}
}
| 3,970 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/HelloWorldJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.fs.Path;
import io.temporal.client.WorkflowOptions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.cluster.GobblinTemporalTaskRunner;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobLauncher;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobScheduler;
/**
* An implementation of {@link JobLauncher} that launches a Gobblin job using the Temporal task framework.
*
* <p>
* This class is instantiated by the {@link GobblinTemporalJobScheduler#buildJobLauncher(Properties)} on every job submission to launch the Gobblin job.
* The actual task execution happens in the {@link GobblinTemporalTaskRunner}, usually in a different process.
* </p>
*/
@Alpha
@Slf4j
public class HelloWorldJobLauncher extends GobblinTemporalJobLauncher {
public HelloWorldJobLauncher(Properties jobProps, Path appWorkDir, List<? extends Tag<?>> metadataTags,
ConcurrentHashMap<String, Boolean> runningMap)
throws Exception {
super(jobProps, appWorkDir, metadataTags, runningMap);
}
@Override
public void submitJob(List<WorkUnit> workunits) {
WorkflowOptions options = WorkflowOptions.newBuilder().setTaskQueue(queueName).build();
GreetingWorkflow greetingWorkflow = this.client.newWorkflowStub(GreetingWorkflow.class, options);
greetingWorkflow.getGreeting("Gobblin");
}
}
| 3,971 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/FormatActivity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
import io.temporal.activity.ActivityInterface;
@ActivityInterface
public interface FormatActivity {
// Define your activity methods which can be called during workflow execution
String composeGreeting(String name);
}
| 3,972 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/helloworld/GreetingWorkflow.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.helloworld;
import io.temporal.workflow.WorkflowInterface;
import io.temporal.workflow.WorkflowMethod;
@WorkflowInterface
public interface GreetingWorkflow {
/**
* This is the method that is executed when the Workflow Execution is started. The Workflow
* Execution completes when this method finishes execution.
*/
@WorkflowMethod
String getGreeting(String name);
}
| 3,973 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/workflows/client/TemporalWorkflowClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.workflows.client;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.security.KeyStore;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.commons.io.FileUtils;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
import io.temporal.client.WorkflowClient;
import io.temporal.client.WorkflowClientOptions;
import io.temporal.serviceclient.WorkflowServiceStubs;
import io.temporal.serviceclient.WorkflowServiceStubsOptions;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManagerFactory;
import org.apache.gobblin.cluster.GobblinClusterUtils;
public class TemporalWorkflowClientFactory {
public static WorkflowServiceStubs createServiceInstance(String connectionUri) throws Exception {
GobblinClusterUtils.setSystemProperties(ConfigFactory.load());
Config config = GobblinClusterUtils.addDynamicConfig(ConfigFactory.load());
String SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT = "gobblin.kafka.sharedConfig.";
String SSL_KEYMANAGER_ALGORITHM = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.keymanager.algorithm";
String SSL_KEYSTORE_TYPE = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.keystore.type";
String SSL_KEYSTORE_LOCATION = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.keystore.location";
String SSL_KEY_PASSWORD = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.key.password";
String SSL_TRUSTSTORE_LOCATION = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.truststore.location";
String SSL_TRUSTSTORE_PASSWORD = SHARED_KAFKA_CONFIG_PREFIX_WITH_DOT + "ssl.truststore.password";
List<String> SSL_CONFIG_DEFAULT_SSL_PROTOCOLS = Collections.unmodifiableList(
Arrays.asList("TLSv1.2"));
List<String> SSL_CONFIG_DEFAULT_CIPHER_SUITES = Collections.unmodifiableList(Arrays.asList(
// The following list is from https://github.com/netty/netty/blob/4.1/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2SecurityUtil.java#L50
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
/* REQUIRED BY HTTP/2 SPEC */
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
/* REQUIRED BY HTTP/2 SPEC */
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
));
String keyStoreType = config.getString(SSL_KEYSTORE_TYPE);
File keyStoreFile = new File(config.getString(SSL_KEYSTORE_LOCATION));
String keyStorePassword = config.getString(SSL_KEY_PASSWORD);
KeyStore keyStore = KeyStore.getInstance(keyStoreType);
keyStore.load(toInputStream(keyStoreFile), keyStorePassword.toCharArray());
// Set key manager from key store
String sslKeyManagerAlgorithm = config.getString(SSL_KEYMANAGER_ALGORITHM);
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(sslKeyManagerAlgorithm);
keyManagerFactory.init(keyStore, keyStorePassword.toCharArray());
// Set trust manager from trust store
KeyStore trustStore = KeyStore.getInstance("JKS");
File trustStoreFile = new File(config.getString(SSL_TRUSTSTORE_LOCATION));
String trustStorePassword = config.getString(SSL_TRUSTSTORE_PASSWORD);
trustStore.load(toInputStream(trustStoreFile), trustStorePassword.toCharArray());
TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("SunX509");
trustManagerFactory.init(trustStore);
SslContext sslContext = GrpcSslContexts.forClient()
.keyManager(keyManagerFactory)
.trustManager(trustManagerFactory)
.protocols(SSL_CONFIG_DEFAULT_SSL_PROTOCOLS)
.ciphers(SSL_CONFIG_DEFAULT_CIPHER_SUITES)
.build();
WorkflowServiceStubsOptions options = WorkflowServiceStubsOptions.newBuilder()
.setTarget(connectionUri)
.setEnableHttps(true)
.setSslContext(sslContext)
.build();
return WorkflowServiceStubs.newServiceStubs(options);
}
public static WorkflowClient createClientInstance(WorkflowServiceStubs service, String namespace) {
WorkflowClientOptions options = WorkflowClientOptions.newBuilder().setNamespace(namespace).build();
return WorkflowClient.newInstance(service, options);
}
private static InputStream toInputStream(File storeFile)
throws IOException {
byte[] data = FileUtils.readFileToByteArray(storeFile);
return new ByteArrayInputStream(data);
}
}
| 3,974 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/yarn/YarnTemporalAppMasterSecurityManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.yarn;
import com.google.common.base.Throwables;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import java.io.IOException;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.yarn.YarnContainerSecurityManager;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
import org.apache.hadoop.fs.FileSystem;
/**
* Copied from {@link org.apache.gobblin.yarn.YarnAppMasterSecurityManager} that uses the {@link YarnService}
*
* This class was created for a fast way to start building out Gobblin on temporal without affecting mainline Yarn/Helix code
*/
public class YarnTemporalAppMasterSecurityManager extends YarnContainerSecurityManager {
private YarnService _yarnService;
public YarnTemporalAppMasterSecurityManager(Config config, FileSystem fs, EventBus eventBus, LogCopier logCopier, YarnService yarnService) {
super(config, fs, eventBus, logCopier);
this._yarnService = yarnService;
}
@Override
public void handleTokenFileUpdatedEvent(DelegationTokenUpdatedEvent delegationTokenUpdatedEvent) {
super.handleTokenFileUpdatedEvent(delegationTokenUpdatedEvent);
try {
_yarnService.updateToken();
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
}
}
| 3,975 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/yarn/YarnService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.yarn;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import lombok.AccessLevel;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterMetricTagNames;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricReporterException;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import org.apache.gobblin.yarn.GobblinYarnConfigurationKeys;
import org.apache.gobblin.yarn.GobblinYarnEventConstants;
import org.apache.gobblin.yarn.GobblinYarnMetricTagNames;
import org.apache.gobblin.yarn.YarnHelixUtils;
import org.apache.gobblin.yarn.event.ContainerReleaseRequest;
import org.apache.gobblin.yarn.event.ContainerShutdownRequest;
import org.apache.gobblin.yarn.event.NewContainerRequest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.Records;
/**
* This class is responsible for all Yarn-related stuffs including ApplicationMaster registration,
* ApplicationMaster un-registration, Yarn container management, etc.
*
* NOTE: This is a stripped down version of {@link org.apache.gobblin.yarn.YarnService} that is used for temporal testing
* without any dependency on Helix. There are some references to helix concepts, but they are left in for the sake of
* keeping some features in-tact. They don't have an actual dependency on helix anymore.
*
*/
class YarnService extends AbstractIdleService {
private static final Logger LOGGER = LoggerFactory.getLogger(YarnService.class);
private static final String UNKNOWN_HELIX_INSTANCE = "UNKNOWN";
private final String applicationName;
private final String applicationId;
private final String appViewAcl;
//Default helix instance tag derived from cluster level config
private final String helixInstanceTags;
private final Config config;
private final EventBus eventBus;
private final Configuration yarnConfiguration;
private final FileSystem fs;
private final Optional<GobblinMetrics> gobblinMetrics;
private final Optional<EventSubmitter> eventSubmitter;
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync;
private final NMClientAsync nmClientAsync;
private final ExecutorService containerLaunchExecutor;
private final int initialContainers;
private final int requestedContainerMemoryMbs;
private final int requestedContainerCores;
private final int jvmMemoryOverheadMbs;
private final double jvmMemoryXmxRatio;
private final boolean containerHostAffinityEnabled;
private final int helixInstanceMaxRetries;
private final Optional<String> containerJvmArgs;
private final String containerTimezone;
@Getter(AccessLevel.PROTECTED)
private volatile Optional<Resource> maxResourceCapacity = Optional.absent();
// Security tokens for accessing HDFS
private ByteBuffer tokens;
private final Closer closer = Closer.create();
private final Object allContainersStopped = new Object();
// A map from container IDs to Container instances, Helix participant IDs of the containers and Helix Tag
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final ConcurrentMap<ContainerId, ContainerInfo> containerMap = Maps.newConcurrentMap();
// A cache of the containers with an outstanding container release request.
// This is a cache instead of a set to get the automatic cleanup in case a container completes before the requested
// release.
@VisibleForTesting
@Getter(AccessLevel.PROTECTED)
private final Cache<ContainerId, String> releasedContainerCache;
// A map from Helix instance names to the number times the instances are retried to be started
private final ConcurrentMap<String, AtomicInteger> helixInstanceRetryCount = Maps.newConcurrentMap();
// A concurrent HashSet of unused Helix instance names. An unused Helix instance name gets put
// into the set if the container running the instance completes. Unused Helix
// instance names get picked up when replacement containers get allocated.
private final Set<String> unusedHelixInstanceNames = ConcurrentHashMap.newKeySet();
// The map from helix tag to allocated container count
private final ConcurrentMap<String, AtomicInteger> allocatedContainerCountMap = Maps.newConcurrentMap();
private final ConcurrentMap<ContainerId, String> removedContainerID = Maps.newConcurrentMap();
private final AtomicInteger priorityNumGenerator = new AtomicInteger(0);
private final Map<String, Integer> resourcePriorityMap = new HashMap<>();
private volatile boolean shutdownInProgress = false;
public YarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus) throws Exception {
this.applicationName = applicationName;
this.applicationId = applicationId;
this.config = config;
this.eventBus = eventBus;
this.gobblinMetrics = config.getBoolean(ConfigurationKeys.METRICS_ENABLED_KEY) ?
Optional.of(buildGobblinMetrics()) : Optional.<GobblinMetrics>absent();
this.eventSubmitter = config.getBoolean(ConfigurationKeys.METRICS_ENABLED_KEY) ?
Optional.of(buildEventSubmitter()) : Optional.<EventSubmitter>absent();
this.yarnConfiguration = yarnConfiguration;
this.fs = fs;
int amRmHeartbeatIntervalMillis = Long.valueOf(TimeUnit.SECONDS.toMillis(
ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.AMRM_HEARTBEAT_INTERVAL_SECS,
GobblinYarnConfigurationKeys.DEFAULT_AMRM_HEARTBEAT_INTERVAL_SECS))).intValue();
this.amrmClientAsync = closer.register(
AMRMClientAsync.createAMRMClientAsync(amRmHeartbeatIntervalMillis, new AMRMClientCallbackHandler()));
this.amrmClientAsync.init(this.yarnConfiguration);
this.nmClientAsync = closer.register(NMClientAsync.createNMClientAsync(getNMClientCallbackHandler()));
this.nmClientAsync.init(this.yarnConfiguration);
this.initialContainers = config.getInt(GobblinYarnConfigurationKeys.INITIAL_CONTAINERS_KEY);
this.requestedContainerMemoryMbs = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY);
this.requestedContainerCores = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_CORES_KEY);
this.containerHostAffinityEnabled = config.getBoolean(GobblinYarnConfigurationKeys.CONTAINER_HOST_AFFINITY_ENABLED);
this.helixInstanceMaxRetries = config.getInt(GobblinYarnConfigurationKeys.HELIX_INSTANCE_MAX_RETRIES);
this.helixInstanceTags = ConfigUtils.getString(config,
GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, GobblinClusterConfigurationKeys.HELIX_DEFAULT_TAG);
this.containerJvmArgs = config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_JVM_ARGS_KEY) ?
Optional.of(config.getString(GobblinYarnConfigurationKeys.CONTAINER_JVM_ARGS_KEY)) :
Optional.<String>absent();
int numContainerLaunchThreads =
ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.MAX_CONTAINER_LAUNCH_THREADS_KEY,
GobblinYarnConfigurationKeys.DEFAULT_MAX_CONTAINER_LAUNCH_THREADS);
this.containerLaunchExecutor = ScalingThreadPoolExecutor.newScalingThreadPool(5, numContainerLaunchThreads, 0L,
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ContainerLaunchExecutor")));
this.tokens = getSecurityTokens();
this.releasedContainerCache = CacheBuilder.newBuilder().expireAfterAccess(ConfigUtils.getInt(config,
GobblinYarnConfigurationKeys.RELEASED_CONTAINERS_CACHE_EXPIRY_SECS,
GobblinYarnConfigurationKeys.DEFAULT_RELEASED_CONTAINERS_CACHE_EXPIRY_SECS), TimeUnit.SECONDS).build();
this.jvmMemoryXmxRatio = ConfigUtils.getDouble(this.config,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY,
GobblinYarnConfigurationKeys.DEFAULT_CONTAINER_JVM_MEMORY_XMX_RATIO);
Preconditions.checkArgument(this.jvmMemoryXmxRatio >= 0 && this.jvmMemoryXmxRatio <= 1,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY + " must be between 0 and 1 inclusive");
this.jvmMemoryOverheadMbs = ConfigUtils.getInt(this.config,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY,
GobblinYarnConfigurationKeys.DEFAULT_CONTAINER_JVM_MEMORY_OVERHEAD_MBS);
Preconditions.checkArgument(this.jvmMemoryOverheadMbs < this.requestedContainerMemoryMbs * this.jvmMemoryXmxRatio,
GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY + " cannot be more than "
+ GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY + " * "
+ GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY);
this.appViewAcl = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.APP_VIEW_ACL,
GobblinYarnConfigurationKeys.DEFAULT_APP_VIEW_ACL);
this.containerTimezone = ConfigUtils.getString(this.config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_TIMEZONE,
GobblinYarnConfigurationKeys.DEFAULT_GOBBLIN_YARN_CONTAINER_TIMEZONE);
}
@SuppressWarnings("unused")
@Subscribe
public void handleNewContainerRequest(NewContainerRequest newContainerRequest) {
if (!this.maxResourceCapacity.isPresent()) {
LOGGER.error(String.format(
"Unable to handle new container request as maximum resource capacity is not available: "
+ "[memory (MBs) requested = %d, vcores requested = %d]", this.requestedContainerMemoryMbs,
this.requestedContainerCores));
return;
}
requestContainer(newContainerRequest.getReplacedContainer().transform(container -> container.getNodeId().getHost()),
newContainerRequest.getResource());
}
protected NMClientCallbackHandler getNMClientCallbackHandler() {
return new NMClientCallbackHandler();
}
@SuppressWarnings("unused")
@Subscribe
public void handleContainerShutdownRequest(ContainerShutdownRequest containerShutdownRequest) {
for (Container container : containerShutdownRequest.getContainers()) {
LOGGER.info(String.format("Stopping container %s running on %s", container.getId(), container.getNodeId()));
this.nmClientAsync.stopContainerAsync(container.getId(), container.getNodeId());
}
}
/**
* Request the Resource Manager to release the container
* @param containerReleaseRequest containers to release
*/
@Subscribe
public void handleContainerReleaseRequest(ContainerReleaseRequest containerReleaseRequest) {
for (Container container : containerReleaseRequest.getContainers()) {
LOGGER.info(String.format("Releasing container %s running on %s", container.getId(), container.getNodeId()));
// Record that this container was explicitly released so that a new one is not spawned to replace it
// Put the container id in the releasedContainerCache before releasing it so that handleContainerCompletion()
// can check for the container id and skip spawning a replacement container.
// Note that this is the best effort since these are asynchronous operations and a container may abort concurrently
// with the release call. So in some cases a replacement container may have already been spawned before
// the container is put into the black list.
this.releasedContainerCache.put(container.getId(), "");
this.amrmClientAsync.releaseAssignedContainer(container.getId());
}
}
@Override
protected synchronized void startUp() throws Exception {
LOGGER.info("Starting the TemporalYarnService");
// Register itself with the EventBus for container-related requests
this.eventBus.register(this);
this.amrmClientAsync.start();
this.nmClientAsync.start();
// The ApplicationMaster registration response is used to determine the maximum resource capacity of the cluster
RegisterApplicationMasterResponse response = this.amrmClientAsync.registerApplicationMaster(
GobblinClusterUtils.getHostname(), -1, "");
LOGGER.info("ApplicationMaster registration response: " + response);
this.maxResourceCapacity = Optional.of(response.getMaximumResourceCapability());
LOGGER.info("Requesting initial containers");
requestInitialContainers(this.initialContainers);
}
@Override
protected void shutDown() throws IOException {
LOGGER.info("Stopping the TemporalYarnService");
this.shutdownInProgress = true;
try {
ExecutorsUtils.shutdownExecutorService(this.containerLaunchExecutor, Optional.of(LOGGER));
// Stop the running containers
for (ContainerInfo containerInfo : this.containerMap.values()) {
LOGGER.info("Stopping container {} running participant {}", containerInfo.getContainer().getId(),
containerInfo.getHelixParticipantId());
this.nmClientAsync.stopContainerAsync(containerInfo.getContainer().getId(), containerInfo.getContainer().getNodeId());
}
if (!this.containerMap.isEmpty()) {
synchronized (this.allContainersStopped) {
try {
// Wait 5 minutes for the containers to stop
Duration waitTimeout = Duration.ofMinutes(5);
this.allContainersStopped.wait(waitTimeout.toMillis());
LOGGER.info("All of the containers have been stopped");
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
this.amrmClientAsync.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
} catch (IOException | YarnException e) {
LOGGER.error("Failed to unregister the ApplicationMaster", e);
} finally {
try {
this.closer.close();
} finally {
if (this.gobblinMetrics.isPresent()) {
this.gobblinMetrics.get().stopMetricsReporting();
}
}
}
}
public void updateToken() throws IOException{
this.tokens = getSecurityTokens();
}
private GobblinMetrics buildGobblinMetrics() {
// Create tags list
ImmutableList.Builder<Tag<?>> tags = new ImmutableList.Builder<>();
tags.add(new Tag<>(GobblinClusterMetricTagNames.APPLICATION_ID, this.applicationId));
tags.add(new Tag<>(GobblinClusterMetricTagNames.APPLICATION_NAME, this.applicationName));
// Intialize Gobblin metrics and start reporters
GobblinMetrics gobblinMetrics = GobblinMetrics.get(this.applicationId, null, tags.build());
try {
gobblinMetrics.startMetricReporting(ConfigUtils.configToProperties(config));
} catch (MultiReporterException ex) {
for (MetricReporterException e: ex.getExceptions()) {
LOGGER.error("Failed to start {} {} reporter.", e.getSinkType().name(), e.getReporterType().name(), e);
}
}
return gobblinMetrics;
}
private EventSubmitter buildEventSubmitter() {
return new EventSubmitter.Builder(this.gobblinMetrics.get().getMetricContext(),
GobblinYarnEventConstants.EVENT_NAMESPACE)
.build();
}
/**
* Request an allocation of containers. If numTargetContainers is larger than the max of current and expected number
* of containers then additional containers are requested.
* <p>
* If numTargetContainers is less than the current number of allocated containers then release free containers.
* Shrinking is relative to the number of currently allocated containers since it takes time for containers
* to be allocated and assigned work and we want to avoid releasing a container prematurely before it is assigned
* work. This means that a container may not be released even though numTargetContainers is less than the requested
* number of containers. The intended usage is for the caller of this method to make periodic calls to attempt to
* adjust the cluster towards the desired number of containers.
*
* @param inUseInstances a set of in use instances
* @return whether successfully requested the target number of containers
*/
public synchronized boolean requestTargetNumberOfContainers(int numContainers, Set<String> inUseInstances) {
int defaultContainerMemoryMbs = config.getInt(GobblinYarnConfigurationKeys.CONTAINER_MEMORY_MBS_KEY);
int defaultContainerCores = config.getInt(GobblinYarnConfigurationKeys. CONTAINER_CORES_KEY);
LOGGER.info("Trying to set numTargetContainers={}, in-use helix instances count is {}, container map size is {}",
numContainers, inUseInstances.size(), this.containerMap.size());
requestContainers(numContainers, Resource.newInstance(defaultContainerMemoryMbs, defaultContainerCores));
LOGGER.info("Current tag-container desired count:{}, tag-container allocated: {}", numContainers, this.allocatedContainerCountMap);
return true;
}
// Request initial containers with default resource and helix tag
private void requestInitialContainers(int containersRequested) {
requestTargetNumberOfContainers(containersRequested, Collections.EMPTY_SET);
}
private void requestContainer(Optional<String> preferredNode, Optional<Resource> resourceOptional) {
Resource desiredResource = resourceOptional.or(Resource.newInstance(
this.requestedContainerMemoryMbs, this.requestedContainerCores));
requestContainer(preferredNode, desiredResource);
}
/**
* Request {@param numContainers} from yarn with the specified resource. Resources will be allocated without a preferred
* node
* @param numContainers
* @param resource
*/
private void requestContainers(int numContainers, Resource resource) {
LOGGER.info("Requesting {} containers with resource={}", numContainers, resource);
IntStream.range(0, numContainers)
.forEach(i -> requestContainer(Optional.absent(), resource));
}
// Request containers with specific resource requirement
private void requestContainer(Optional<String> preferredNode, Resource resource) {
// Fail if Yarn cannot meet container resource requirements
Preconditions.checkArgument(resource.getMemory() <= this.maxResourceCapacity.get().getMemory() &&
resource.getVirtualCores() <= this.maxResourceCapacity.get().getVirtualCores(),
"Resource requirement must less than the max resource capacity. Requested resource" + resource.toString()
+ " exceed the max resource limit " + this.maxResourceCapacity.get().toString());
// Due to YARN-314, different resource capacity needs different priority, otherwise Yarn will not allocate container
Priority priority = Records.newRecord(Priority.class);
if(!resourcePriorityMap.containsKey(resource.toString())) {
resourcePriorityMap.put(resource.toString(), priorityNumGenerator.getAndIncrement());
}
int priorityNum = resourcePriorityMap.get(resource.toString());
priority.setPriority(priorityNum);
String[] preferredNodes = preferredNode.isPresent() ? new String[] {preferredNode.get()} : null;
this.amrmClientAsync.addContainerRequest(
new AMRMClient.ContainerRequest(resource, preferredNodes, null, priority));
}
protected ContainerLaunchContext newContainerLaunchContext(ContainerInfo containerInfo)
throws IOException {
Path appWorkDir = GobblinClusterUtils.getAppWorkDirPathFromConfig(this.config, this.fs, this.applicationName, this.applicationId);
Path containerWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME);
Map<String, LocalResource> resourceMap = Maps.newHashMap();
addContainerLocalResources(new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME), resourceMap);
addContainerLocalResources(new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME), resourceMap);
addContainerLocalResources(
new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME), resourceMap);
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY)) {
YarnHelixUtils.addRemoteFilesToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY),
resourceMap, yarnConfiguration);
}
if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_ZIPS_REMOTE_KEY)) {
YarnHelixUtils.addRemoteZipsToLocalResources(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_ZIPS_REMOTE_KEY),
resourceMap, yarnConfiguration);
}
ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
containerLaunchContext.setLocalResources(resourceMap);
containerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
containerLaunchContext.setCommands(Arrays.asList(containerInfo.getStartupCommand()));
Map<ApplicationAccessType, String> acls = new HashMap<>(1);
acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
containerLaunchContext.setApplicationACLs(acls);
if (UserGroupInformation.isSecurityEnabled()) {
containerLaunchContext.setTokens(this.tokens.duplicate());
}
return containerLaunchContext;
}
private void addContainerLocalResources(Path destDir, Map<String, LocalResource> resourceMap) throws IOException {
if (!this.fs.exists(destDir)) {
LOGGER.warn(String.format("Path %s does not exist so no container LocalResource to add", destDir));
return;
}
FileStatus[] statuses = this.fs.listStatus(destDir);
if (statuses != null) {
for (FileStatus status : statuses) {
YarnHelixUtils.addFileAsLocalResource(this.fs, status.getPath(), LocalResourceType.FILE, resourceMap);
}
}
}
protected ByteBuffer getSecurityTokens() throws IOException {
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
Closer closer = Closer.create();
try {
DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
credentials.writeTokenStorageToStream(dataOutputBuffer);
// Remove the AM->RM token so that containers cannot access it
Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
while (tokenIterator.hasNext()) {
Token<?> token = tokenIterator.next();
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
tokenIterator.remove();
}
}
return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
@VisibleForTesting
protected String buildContainerCommand(Container container, String helixParticipantId, String helixInstanceTag) {
String containerProcessName = GobblinTemporalYarnTaskRunner.class.getSimpleName();
StringBuilder containerCommand = new StringBuilder()
.append(ApplicationConstants.Environment.JAVA_HOME.$()).append("/bin/java")
.append(" -Xmx").append((int) (container.getResource().getMemory() * this.jvmMemoryXmxRatio) -
this.jvmMemoryOverheadMbs).append("M")
.append(" -D").append(GobblinYarnConfigurationKeys.JVM_USER_TIMEZONE_CONFIG).append("=").append(this.containerTimezone)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_DIR_NAME).append("=").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR)
.append(" -D").append(GobblinYarnConfigurationKeys.GOBBLIN_YARN_CONTAINER_LOG_FILE_NAME).append("=").append(containerProcessName).append(".").append(ApplicationConstants.STDOUT)
.append(" ").append(JvmUtils.formatJvmArguments(this.containerJvmArgs))
.append(" ").append(GobblinTemporalYarnTaskRunner.class.getName())
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)
.append(" ").append(this.applicationName)
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)
.append(" ").append(this.applicationId)
.append(" --").append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)
.append(" ").append(helixParticipantId);
if (!Strings.isNullOrEmpty(helixInstanceTag)) {
containerCommand.append(" --").append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME)
.append(" ").append(helixInstanceTag);
}
LOGGER.info("Building " + containerProcessName);
return containerCommand.append(" 1>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
containerProcessName).append(".").append(ApplicationConstants.STDOUT)
.append(" 2>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator).append(
containerProcessName).append(".").append(ApplicationConstants.STDERR).toString();
}
/**
* Check the exit status of a completed container and see if the replacement container
* should try to be started on the same node. Some exit status indicates a disk or
* node failure and in such cases the replacement container should try to be started on
* a different node.
*/
private boolean shouldStickToTheSameNode(int containerExitStatus) {
switch (containerExitStatus) {
case ContainerExitStatus.DISKS_FAILED:
return false;
case ContainerExitStatus.ABORTED:
// Mostly likely this exit status is due to node failures because the
// application itself will not release containers.
return false;
default:
// Stick to the same node for other cases if host affinity is enabled.
return this.containerHostAffinityEnabled;
}
}
/**
* Handle the completion of a container. A new container will be requested to replace the one
* that just exited. Depending on the exit status and if container host affinity is enabled,
* the new container may or may not try to be started on the same node.
* <p>
* A container completes in either of the following conditions: 1) some error happens in the
* container and caused the container to exit, 2) the container gets killed due to some reason,
* for example, if it runs over the allowed amount of virtual or physical memory, 3) the gets
* preempted by the ResourceManager, or 4) the container gets stopped by the ApplicationMaster.
* A replacement container is needed in all but the last case.
*/
protected void handleContainerCompletion(ContainerStatus containerStatus) {
ContainerInfo completedContainerInfo = this.containerMap.remove(containerStatus.getContainerId());
//Get the Helix instance name for the completed container. Because callbacks are processed asynchronously, we might
//encounter situations where handleContainerCompletion() is called before onContainersAllocated(), resulting in the
//containerId missing from the containersMap.
// We use removedContainerID to remember these containers and remove them from containerMap later when we call requestTargetNumberOfContainers method
if (completedContainerInfo == null) {
removedContainerID.putIfAbsent(containerStatus.getContainerId(), "");
}
String completedInstanceName = UNKNOWN_HELIX_INSTANCE;
String helixTag = completedContainerInfo == null ? helixInstanceTags : completedContainerInfo.getHelixTag();
if (completedContainerInfo != null) {
allocatedContainerCountMap.get(helixTag).decrementAndGet();
}
LOGGER.info(String.format("Container %s running Helix instance %s with tag %s has completed with exit status %d",
containerStatus.getContainerId(), completedInstanceName, helixTag, containerStatus.getExitStatus()));
if (!Strings.isNullOrEmpty(containerStatus.getDiagnostics())) {
LOGGER.info(String.format("Received the following diagnostics information for container %s: %s",
containerStatus.getContainerId(), containerStatus.getDiagnostics()));
}
switch(containerStatus.getExitStatus()) {
case(ContainerExitStatus.ABORTED):
if (handleAbortedContainer(containerStatus, completedContainerInfo, completedInstanceName)) {
return;
}
break;
case(1): // Same as linux exit status 1 Often occurs when launch_container.sh failed
LOGGER.info("Exit status 1. CompletedContainerInfo={}", completedContainerInfo);
break;
default:
break;
}
if (this.shutdownInProgress) {
return;
}
if(completedContainerInfo != null) {
this.helixInstanceRetryCount.putIfAbsent(completedInstanceName, new AtomicInteger(0));
int retryCount = this.helixInstanceRetryCount.get(completedInstanceName).incrementAndGet();
// Populate event metadata
Optional<ImmutableMap.Builder<String, String>> eventMetadataBuilder = Optional.absent();
if (this.eventSubmitter.isPresent()) {
eventMetadataBuilder = Optional.of(buildContainerStatusEventMetadata(containerStatus));
eventMetadataBuilder.get().put(GobblinYarnEventConstants.EventMetadata.HELIX_INSTANCE_ID, completedInstanceName);
eventMetadataBuilder.get().put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_RETRY_ATTEMPT, retryCount + "");
}
if (this.helixInstanceMaxRetries > 0 && retryCount > this.helixInstanceMaxRetries) {
if (this.eventSubmitter.isPresent()) {
this.eventSubmitter.get()
.submit(GobblinYarnEventConstants.EventNames.HELIX_INSTANCE_COMPLETION, eventMetadataBuilder.get().build());
}
LOGGER.warn("Maximum number of retries has been achieved for Helix instance " + completedInstanceName);
return;
}
// Add the Helix instance name of the completed container to the set of unused
// instance names so they can be reused by a replacement container.
LOGGER.info("Adding instance {} to the pool of unused instances", completedInstanceName);
this.unusedHelixInstanceNames.add(completedInstanceName);
if (this.eventSubmitter.isPresent()) {
this.eventSubmitter.get()
.submit(GobblinYarnEventConstants.EventNames.HELIX_INSTANCE_COMPLETION, eventMetadataBuilder.get().build());
}
}
Optional<Resource> newContainerResource = completedContainerInfo != null ?
Optional.of(completedContainerInfo.getContainer().getResource()) : Optional.absent();
LOGGER.info("Requesting a new container to replace {} to run Helix instance {} with helix tag {} and resource {}",
containerStatus.getContainerId(), completedInstanceName, helixTag, newContainerResource.orNull());
this.eventBus.post(new NewContainerRequest(
shouldStickToTheSameNode(containerStatus.getExitStatus()) && completedContainerInfo != null ?
Optional.of(completedContainerInfo.getContainer()) : Optional.absent(), newContainerResource));
}
private boolean handleAbortedContainer(ContainerStatus containerStatus, ContainerInfo completedContainerInfo,
String completedInstanceName) {
if (this.releasedContainerCache.getIfPresent(containerStatus.getContainerId()) != null) {
LOGGER.info("Container release requested, so not spawning a replacement for containerId {}", containerStatus.getContainerId());
if (completedContainerInfo != null) {
LOGGER.info("Adding instance {} to the pool of unused instances", completedInstanceName);
this.unusedHelixInstanceNames.add(completedInstanceName);
}
return true;
}
LOGGER.info("Container {} aborted due to lost NM", containerStatus.getContainerId());
return false;
}
private ImmutableMap.Builder<String, String> buildContainerStatusEventMetadata(ContainerStatus containerStatus) {
ImmutableMap.Builder<String, String> eventMetadataBuilder = new ImmutableMap.Builder<>();
eventMetadataBuilder.put(GobblinYarnMetricTagNames.CONTAINER_ID, containerStatus.getContainerId().toString());
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_CONTAINER_STATE,
containerStatus.getState().toString());
if (ContainerExitStatus.INVALID != containerStatus.getExitStatus()) {
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_EXIT_STATUS,
containerStatus.getExitStatus() + "");
}
if (!Strings.isNullOrEmpty(containerStatus.getDiagnostics())) {
eventMetadataBuilder.put(GobblinYarnEventConstants.EventMetadata.CONTAINER_STATUS_EXIT_DIAGNOSTICS,
containerStatus.getDiagnostics());
}
return eventMetadataBuilder;
}
/**
* A custom implementation of {@link AMRMClientAsync.CallbackHandler}.
*/
private class AMRMClientCallbackHandler implements AMRMClientAsync.CallbackHandler {
private volatile boolean done = false;
@Override
public void onContainersCompleted(List<ContainerStatus> statuses) {
for (ContainerStatus containerStatus : statuses) {
handleContainerCompletion(containerStatus);
}
}
@Override
public void onContainersAllocated(List<Container> containers) {
for (final Container container : containers) {
String containerId = container.getId().toString();
String containerHelixTag = helixInstanceTags;
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_ALLOCATION,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId);
}
LOGGER.info("Container {} has been allocated with resource {} for helix tag {}",
container.getId(), container.getResource(), containerHelixTag);
//Iterate over the (thread-safe) set of unused instances to find the first instance that is not currently live.
//Once we find a candidate instance, it is removed from the set.
String instanceName = null;
//Ensure that updates to unusedHelixInstanceNames are visible to other threads that might concurrently
//invoke the callback on container allocation.
synchronized (this) {
Iterator<String> iterator = unusedHelixInstanceNames.iterator();
while (iterator.hasNext()) {
instanceName = iterator.next();
}
}
ContainerInfo containerInfo = new ContainerInfo(container, instanceName, containerHelixTag);
containerMap.put(container.getId(), containerInfo);
allocatedContainerCountMap.putIfAbsent(containerHelixTag, new AtomicInteger(0));
allocatedContainerCountMap.get(containerHelixTag).incrementAndGet();
// Find matching requests and remove the request (YARN-660). We the scheduler are responsible
// for cleaning up requests after allocation based on the design in the described ticket.
// YARN does not have a delta request API and the requests are not cleaned up automatically.
// Try finding a match first with the host as the resource name then fall back to any resource match.
// Also see YARN-1902. Container count will explode without this logic for removing container requests.
List<? extends Collection<AMRMClient.ContainerRequest>> matchingRequests = amrmClientAsync
.getMatchingRequests(container.getPriority(), container.getNodeHttpAddress(), container.getResource());
if (matchingRequests.isEmpty()) {
LOGGER.debug("Matching request by host {} not found", container.getNodeHttpAddress());
matchingRequests = amrmClientAsync
.getMatchingRequests(container.getPriority(), ResourceRequest.ANY, container.getResource());
}
if (!matchingRequests.isEmpty()) {
AMRMClient.ContainerRequest firstMatchingContainerRequest = matchingRequests.get(0).iterator().next();
LOGGER.debug("Found matching requests {}, removing first matching request {}",
matchingRequests, firstMatchingContainerRequest);
amrmClientAsync.removeContainerRequest(firstMatchingContainerRequest);
}
containerLaunchExecutor.submit(new Runnable() {
@Override
public void run() {
try {
LOGGER.info("Starting container " + containerId);
nmClientAsync.startContainerAsync(container, newContainerLaunchContext(containerInfo));
} catch (IOException ioe) {
LOGGER.error("Failed to start container " + containerId, ioe);
}
}
});
}
}
@Override
public void onShutdownRequest() {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.SHUTDOWN_REQUEST);
}
LOGGER.info("Received shutdown request from the ResourceManager");
this.done = true;
eventBus.post(new ClusterManagerShutdownRequest());
}
@Override
public void onNodesUpdated(List<NodeReport> updatedNodes) {
for (NodeReport nodeReport : updatedNodes) {
LOGGER.info("Received node update report: " + nodeReport);
}
}
@Override
public float getProgress() {
return this.done ? 1.0f : 0.0f;
}
@Override
public void onError(Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.ERROR,
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error("Received error: " + t, t);
this.done = true;
eventBus.post(new ClusterManagerShutdownRequest());
}
}
/**
* A custom implementation of {@link NMClientAsync.CallbackHandler}.
*/
class NMClientCallbackHandler implements NMClientAsync.CallbackHandler {
@Override
public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STARTED,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString());
}
LOGGER.info(String.format("Container %s has been started", containerId));
}
@Override
public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STATUS_RECEIVED,
buildContainerStatusEventMetadata(containerStatus).build());
}
LOGGER.info(String.format("Received container status for container %s: %s", containerId, containerStatus));
}
@Override
public void onContainerStopped(ContainerId containerId) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STOPPED,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString());
}
LOGGER.info(String.format("Container %s has been stopped", containerId));
if (containerMap.isEmpty()) {
synchronized (allContainersStopped) {
allContainersStopped.notify();
}
}
}
@Override
public void onStartContainerError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_START_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to start container %s due to error %s", containerId, t));
}
@Override
public void onGetContainerStatusError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_GET_STATUS_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to get status for container %s due to error %s", containerId, t));
}
@Override
public void onStopContainerError(ContainerId containerId, Throwable t) {
if (eventSubmitter.isPresent()) {
eventSubmitter.get().submit(GobblinYarnEventConstants.EventNames.CONTAINER_STOP_ERROR,
GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString(),
GobblinYarnEventConstants.EventMetadata.ERROR_EXCEPTION, Throwables.getStackTraceAsString(t));
}
LOGGER.error(String.format("Failed to stop container %s due to error %s", containerId, t));
}
}
// Class encapsulates Container instances, Helix participant IDs of the containers, Helix Tag, and
// initial startup command
@Getter
class ContainerInfo {
private final Container container;
private final String helixParticipantId;
private final String helixTag;
private final String startupCommand;
public ContainerInfo(Container container, String helixParticipantId, String helixTag) {
this.container = container;
this.helixParticipantId = helixParticipantId;
this.helixTag = helixTag;
this.startupCommand = YarnService.this.buildContainerCommand(container, helixParticipantId, helixTag);
}
@Override
public String toString() {
return String.format("ContainerInfo{ container=%s, helixParticipantId=%s, helixTag=%s, startupCommand=%s }",
container.getId(), helixParticipantId, helixTag, startupCommand);
}
}
}
| 3,976 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/yarn/GobblinTemporalApplicationMaster.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.yarn;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import java.util.List;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.temporal.cluster.GobblinTemporalClusterManager;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.yarn.GobblinYarnConfigurationKeys;
import org.apache.gobblin.yarn.GobblinYarnLogSource;
import org.apache.gobblin.yarn.YarnContainerSecurityManager;
import org.apache.gobblin.yarn.YarnHelixUtils;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* The Yarn ApplicationMaster class for Gobblin using Temporal.
*
* <p>
* This class runs the {@link YarnService} for all Yarn-related stuffs like ApplicationMaster registration
* and un-registration and Yarn container provisioning.
* </p>
*
*/
@Alpha
public class GobblinTemporalApplicationMaster extends GobblinTemporalClusterManager {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTemporalApplicationMaster.class);
@Getter
private final YarnService _yarnService;
private LogCopier logCopier;
public GobblinTemporalApplicationMaster(String applicationName, String applicationId, ContainerId containerId, Config config,
YarnConfiguration yarnConfiguration) throws Exception {
super(applicationName, applicationId, config.withValue(GobblinYarnConfigurationKeys.CONTAINER_NUM_KEY,
ConfigValueFactory.fromAnyRef(YarnHelixUtils.getContainerNum(containerId.toString()))),
Optional.<Path>absent());
String containerLogDir = config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
if (gobblinYarnLogSource.isLogSourcePresent()) {
Path appWorkDir = PathUtils.combinePaths(containerLogDir, GobblinClusterUtils.getAppWorkDirPath(this.clusterName, this.applicationId), "AppMaster");
logCopier = gobblinYarnLogSource.buildLogCopier(this.config, containerId.toString(), this.fs, appWorkDir);
this.applicationLauncher
.addService(logCopier);
}
YarnHelixUtils.setYarnClassPath(config, yarnConfiguration);
YarnHelixUtils.setAdditionalYarnClassPath(config, yarnConfiguration);
this._yarnService = buildTemporalYarnService(this.config, applicationName, this.applicationId, yarnConfiguration, this.fs);
this.applicationLauncher.addService(this._yarnService);
if (UserGroupInformation.isSecurityEnabled()) {
LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
this.applicationLauncher.addService(buildYarnContainerSecurityManager(this.config, this.fs));
}
// Add additional services
List<String> serviceClassNames = ConfigUtils.getStringList(this.config,
GobblinYarnConfigurationKeys.APP_MASTER_SERVICE_CLASSES);
for (String serviceClassName : serviceClassNames) {
Class<?> serviceClass = Class.forName(serviceClassName);
this.applicationLauncher.addService((Service) GobblinConstructorUtils.invokeLongestConstructor(serviceClass, this));
}
}
/**
* Build the {@link YarnService} for the Application Master.
*/
protected YarnService buildTemporalYarnService(Config config, String applicationName, String applicationId,
YarnConfiguration yarnConfiguration, FileSystem fs)
throws Exception {
return new YarnService(config, applicationName, applicationId, yarnConfiguration, fs, this.eventBus);
}
/**
* Build the {@link YarnTemporalAppMasterSecurityManager} for the Application Master.
*/
private YarnContainerSecurityManager buildYarnContainerSecurityManager(Config config, FileSystem fs) {
return new YarnTemporalAppMasterSecurityManager(config, fs, this.eventBus, this.logCopier, this._yarnService);
}
private static Options buildOptions() {
Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Yarn application name");
options.addOption("d", GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME, true, "Yarn application id");
return options;
}
private static void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(GobblinTemporalApplicationMaster.class.getSimpleName(), options);
}
public static void main(String[] args) throws Exception {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) ||
(!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME))) {
printUsage(options);
System.exit(1);
}
//Because AM is restarted with the original AppSubmissionContext, it may have outdated delegation tokens.
//So the refreshed tokens should be added into the container's UGI before any HDFS/Hive/RM access is performed.
YarnHelixUtils.updateToken(GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
Log4jConfigurationHelper.updateLog4jConfiguration(GobblinTemporalApplicationMaster.class,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE);
LOGGER.info(JvmUtils.getJvmInputArguments());
ContainerId containerId =
ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key()));
try (GobblinTemporalApplicationMaster applicationMaster = new GobblinTemporalApplicationMaster(
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME),
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME), containerId,
ConfigFactory.load(), new YarnConfiguration())) {
applicationMaster.start();
}
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
| 3,977 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/yarn/GobblinTemporalYarnTaskRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.yarn;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.gobblin.yarn.GobblinYarnConfigurationKeys;
import org.apache.gobblin.yarn.GobblinYarnLogSource;
import org.apache.gobblin.yarn.YarnContainerSecurityManager;
import org.apache.gobblin.yarn.YarnHelixUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.temporal.cluster.GobblinTemporalTaskRunner;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import org.apache.gobblin.util.logs.LogCopier;
public class GobblinTemporalYarnTaskRunner extends GobblinTemporalTaskRunner {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTaskRunner.class);
public GobblinTemporalYarnTaskRunner(String applicationName, String applicationId, ContainerId containerId, Config config,
Optional<Path> appWorkDirOptional) throws Exception {
super(applicationName, applicationId, getTaskRunnerId(containerId), config
.withValue(GobblinYarnConfigurationKeys.CONTAINER_NUM_KEY,
ConfigValueFactory.fromAnyRef(YarnHelixUtils.getContainerNum(containerId.toString()))), appWorkDirOptional);
}
@Override
public List<Service> getServices() {
List<Service> services = new ArrayList<>();
services.addAll(super.getServices());
LogCopier logCopier = null;
if (clusterConfig.hasPath(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY)) {
GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
String containerLogDir = clusterConfig.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
if (gobblinYarnLogSource.isLogSourcePresent()) {
try {
logCopier = gobblinYarnLogSource.buildLogCopier(this.clusterConfig, this.taskRunnerId, this.fs,
new Path(containerLogDir, GobblinClusterUtils.getAppWorkDirPath(this.applicationName, this.applicationId)));
services.add(logCopier);
} catch (Exception e) {
LOGGER.warn("Cannot add LogCopier service to the service manager due to", e);
}
}
}
if (UserGroupInformation.isSecurityEnabled()) {
LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
services.add(new YarnContainerSecurityManager(this.clusterConfig, this.fs, this.eventBus, logCopier));
}
return services;
}
private static String getApplicationId(ContainerId containerId) {
return containerId.getApplicationAttemptId().getApplicationId().toString();
}
private static String getTaskRunnerId(ContainerId containerId) {
return containerId.toString();
}
public static void main(String[] args) {
LOGGER.info("Starting GobblinTemporalYarnTaskRunner");
Options options = GobblinTemporalTaskRunner.buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) || !cmd
.hasOption(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)) {
GobblinTemporalTaskRunner.printUsage(options);
System.exit(1);
}
Log4jConfigurationHelper.updateLog4jConfiguration(GobblinTaskRunner.class,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE);
LOGGER.info(JvmUtils.getJvmInputArguments());
ContainerId containerId =
ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key()));
String applicationName = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME);
String applicationId = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME);
Config config = ConfigFactory.load();
GobblinTemporalTaskRunner gobblinTemporalTaskRunner =
new GobblinTemporalYarnTaskRunner(applicationName, applicationId, containerId, config,
Optional.<Path>absent());
gobblinTemporalTaskRunner.start();
} catch (ParseException pe) {
GobblinTemporalTaskRunner.printUsage(options);
System.exit(1);
} catch (Throwable t) {
// Ideally, we should not be catching non-recoverable exceptions and errors. However,
// simply propagating the exception may prevent the container exit due to the presence of non-daemon threads present
// in the application. Hence, we catch this exception to invoke System.exit() which in turn ensures that all non-daemon threads are killed.
LOGGER.error("Exception encountered: {}", t);
System.exit(1);
}
}
}
| 3,978 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/launcher/GenArbitraryLoadJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.launcher;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import io.temporal.client.WorkflowOptions;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.temporal.cluster.GobblinTemporalTaskRunner;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobLauncher;
import org.apache.gobblin.temporal.joblauncher.GobblinTemporalJobScheduler;
import org.apache.gobblin.temporal.loadgen.work.IllustrationItem;
import org.apache.gobblin.temporal.loadgen.work.SimpleGeneratedWorkload;
import org.apache.gobblin.temporal.util.nesting.work.WorkflowAddr;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
import org.apache.gobblin.temporal.util.nesting.workflow.NestingExecWorkflow;
import org.apache.gobblin.util.PropertiesUtils;
import static org.apache.gobblin.temporal.GobblinTemporalConfigurationKeys.GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX;
/**
* A {@link JobLauncher} for the initial triggering of a Temporal workflow that generates arbitrary load of many
* activities nested beneath a single subsuming super-workflow. see: {@link NestingExecWorkflow}
*
* <p>
* This class is instantiated by the {@link GobblinTemporalJobScheduler#buildJobLauncher(Properties)} on every job submission to launch the Gobblin job.
* The actual task execution happens in the {@link GobblinTemporalTaskRunner}, usually in a different process.
* </p>
*/
@Alpha
@Slf4j
public class GenArbitraryLoadJobLauncher extends GobblinTemporalJobLauncher {
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_NUM_ACTIVITIES = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "num.activities";
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_MAX_BRANCHES_PER_TREE = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "max.branches.per.tree";
public static final String GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_MAX_SUB_TREES_PER_TREE = GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_PREFIX + "max.sub.trees.per.tree";
public GenArbitraryLoadJobLauncher(
Properties jobProps,
Path appWorkDir,
List<? extends Tag<?>> metadataTags,
ConcurrentHashMap<String, Boolean> runningMap
) throws Exception {
super(jobProps, appWorkDir, metadataTags, runningMap);
}
@Override
public void submitJob(List<WorkUnit> workunits) {
int numActivities = PropertiesUtils.getRequiredPropAsInt(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_NUM_ACTIVITIES);
int maxBranchesPerTree = PropertiesUtils.getRequiredPropAsInt(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_MAX_BRANCHES_PER_TREE);
int maxSubTreesPerTree = PropertiesUtils.getRequiredPropAsInt(this.jobProps, GOBBLIN_TEMPORAL_JOB_LAUNCHER_ARG_MAX_SUB_TREES_PER_TREE);
Workload<IllustrationItem> workload = SimpleGeneratedWorkload.createAs(numActivities);
WorkflowOptions options = WorkflowOptions.newBuilder().setTaskQueue(this.queueName).build();
// WARNING: although type param must agree w/ that of `workload`, it's entirely unverified by type checker!
// ...and more to the point, mismatch would occur at runtime (`performWorkload` on the workflow type given to the stub)!
NestingExecWorkflow<IllustrationItem> workflow = this.client.newWorkflowStub(NestingExecWorkflow.class, options);
workflow.performWorkload(WorkflowAddr.ROOT, workload, 0, maxBranchesPerTree, maxSubTreesPerTree, Optional.empty());
}
}
| 3,979 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/activity/IllustrationItemActivity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.activity;
import io.temporal.activity.ActivityInterface;
import io.temporal.activity.ActivityMethod;
import org.apache.gobblin.temporal.loadgen.work.IllustrationItem;
/**
* Activity for processing {@link IllustrationItem}s
*
* CAUTION/FINDING: an `@ActivityInterface` must not be parameterized (e.g. here, by WORK_ITEM), as doing so results in:
* io.temporal.failure.ApplicationFailure: message='class java.util.LinkedHashMap cannot be cast to class
* org.apache.gobblin.temporal.loadgen.work.IllustrationItem', type='java.lang.ClassCastException'
*/
@ActivityInterface
public interface IllustrationItemActivity {
@ActivityMethod
String handleItem(IllustrationItem item);
}
| 3,980 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/activity | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/activity/impl/IllustrationItemActivityImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.activity.impl;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.temporal.loadgen.activity.IllustrationItemActivity;
import org.apache.gobblin.temporal.loadgen.work.IllustrationItem;
@Slf4j
public class IllustrationItemActivityImpl implements IllustrationItemActivity {
@Override
public String handleItem(final IllustrationItem item) {
log.info("Now illustrating - '" + item.getName() + "'");
return item.getName();
}
}
| 3,981 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/work/IllustrationItem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.work;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
/** Generally, this would specify what "work" needs performing plus how to perform, but for now merely a unique name (to log) */
@Data
@NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@RequiredArgsConstructor
public class IllustrationItem {
@NonNull
private String name;
}
| 3,982 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/work/SimpleGeneratedWorkload.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.work;
import com.fasterxml.jackson.annotation.JsonIgnore;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import lombok.AccessLevel;
import org.apache.gobblin.temporal.util.nesting.work.SeqBackedWorkSpan;
import org.apache.gobblin.temporal.util.nesting.work.Workload;
/** Example, illustration workload that synthesizes its work items; genuine {@link Workload}s generally arise from query/calc */
@lombok.AllArgsConstructor(access = AccessLevel.PRIVATE)
@lombok.NoArgsConstructor // IMPORTANT: for jackson (de)serialization
@lombok.ToString
public class SimpleGeneratedWorkload implements Workload<IllustrationItem> {
private int numItems;
/** Factory method */
public static SimpleGeneratedWorkload createAs(final int numItems) {
return new SimpleGeneratedWorkload(numItems);
}
@Override
public Optional<Workload.WorkSpan<IllustrationItem>> getSpan(final int startIndex, final int numElements) {
if (startIndex >= numItems || startIndex < 0) {
return Optional.empty();
} else {
List<IllustrationItem> elems = IntStream.range(startIndex, Math.min(startIndex + numElements, numItems))
.mapToObj(n -> new IllustrationItem("item-" + n + "-of-" + numItems))
.collect(Collectors.toList());
return Optional.of(new SeqBackedWorkSpan<>(elems, startIndex));
}
}
@Override
public boolean isIndexKnownToExceed(final int index) {
return isDefiniteSize() && index >= numItems;
}
@Override
@JsonIgnore // (because no-arg method resembles 'java bean property')
public boolean isDefiniteSize() {
return true;
}
}
| 3,983 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/workflow | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/workflow/impl/NestingExecOfIllustrationItemActivityWorkflowImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.workflow.impl;
import io.temporal.activity.ActivityOptions;
import io.temporal.common.RetryOptions;
import io.temporal.workflow.Async;
import io.temporal.workflow.Promise;
import io.temporal.workflow.Workflow;
import java.time.Duration;
import org.apache.gobblin.temporal.loadgen.activity.IllustrationItemActivity;
import org.apache.gobblin.temporal.loadgen.work.IllustrationItem;
import org.apache.gobblin.temporal.util.nesting.workflow.AbstractNestingExecWorkflowImpl;
/** {@link org.apache.gobblin.temporal.util.nesting.workflow.NestingExecWorkflow} for {@link IllustrationItem} */
public class NestingExecOfIllustrationItemActivityWorkflowImpl
extends AbstractNestingExecWorkflowImpl<IllustrationItem, String> {
// RetryOptions specify how to automatically handle retries when Activities fail.
private static final RetryOptions ACTIVITY_RETRY_OPTS = RetryOptions.newBuilder()
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumInterval(Duration.ofSeconds(100))
.setBackoffCoefficient(2)
.setMaximumAttempts(3)
.build();
private static final ActivityOptions ACTIVITY_OPTS = ActivityOptions.newBuilder()
.setStartToCloseTimeout(Duration.ofSeconds(10))
.setRetryOptions(ACTIVITY_RETRY_OPTS)
.build();
private final IllustrationItemActivity activityStub =
Workflow.newActivityStub(IllustrationItemActivity.class, ACTIVITY_OPTS);
@Override
protected Promise<String> launchAsyncActivity(final IllustrationItem item) {
return Async.function(activityStub::handleItem, item);
}
}
| 3,984 |
0 | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen | Create_ds/gobblin/gobblin-temporal/src/main/java/org/apache/gobblin/temporal/loadgen/worker/ArbitraryLoadWorker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.temporal.loadgen.worker;
import com.typesafe.config.Config;
import io.temporal.client.WorkflowClient;
import org.apache.gobblin.temporal.cluster.AbstractTemporalWorker;
import org.apache.gobblin.temporal.loadgen.activity.impl.IllustrationItemActivityImpl;
import org.apache.gobblin.temporal.loadgen.workflow.impl.NestingExecOfIllustrationItemActivityWorkflowImpl;
/** Worker for {@link NestingExecOfIllustrationItemActivityWorkflowImpl} and said activity impl */
public class ArbitraryLoadWorker extends AbstractTemporalWorker {
public ArbitraryLoadWorker(Config config, WorkflowClient workflowClient) {
super(config, workflowClient);
}
@Override
protected Class<?>[] getWorkflowImplClasses() {
return new Class[] { NestingExecOfIllustrationItemActivityWorkflowImpl.class };
}
@Override
protected Object[] getActivityImplInstances() {
return new Object[] { new IllustrationItemActivityImpl() };
}
}
| 3,985 |
0 | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit/values/MockSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.audit.values.sink.AuditSink;
import java.io.IOException;
import org.apache.avro.generic.GenericRecord;
import com.typesafe.config.Config;
@Alias(value = "MockSink")
public class MockSink implements AuditSink {
public MockSink(Config config, ValueAuditRuntimeMetadata auditMetadata) {}
@Override
public void close() throws IOException {}
@Override
public void write(GenericRecord record) throws IOException {}
}
| 3,986 |
0 | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit/values/FsAuditSinkTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.audit.values.sink.FsAuditSink;
import java.io.File;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.ConfigFactory;
@Test(groups = { "gobblin.audit.values" })
public class FsAuditSinkTest {
@Test
public void testWrite() throws Exception {
Schema testSchema = SchemaBuilder.record("test").fields().name("f1").type().stringType().noDefault().endRecord();
GenericRecord r = new GenericRecordBuilder(testSchema).set("f1", "v1").build();
ValueAuditRuntimeMetadata auditMetadata =
ValueAuditRuntimeMetadata.builder("db", "tb", testSchema).snapshotId(RandomStringUtils.randomAlphanumeric(5))
.partFileName("part-1.avro").build();
File auditFile = null;
Path auditDir = null;
try (FsAuditSink auditSink = new FsAuditSink(ConfigFactory.empty(), auditMetadata);) {
auditFile = new File(auditSink.getAuditFilePath().toString());
auditDir = auditSink.getAuditDirPath();
auditSink.write(r);
} catch (Exception e){
FileSystem.get(new Configuration()).delete(auditDir, true);
throw e;
}
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(testSchema);
try (DataFileReader<GenericRecord> dataFileReader = new DataFileReader<>(auditFile, reader);) {
while (dataFileReader.hasNext()) {
GenericRecord recRead = dataFileReader.next();
Assert.assertEquals(recRead, r);
break;
}
Assert.assertEquals(dataFileReader.hasNext(), false);
} finally {
FileSystem.get(new Configuration()).delete(auditDir, true);
}
}
}
| 3,987 |
0 | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit/values/ValueAuditGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values;
import static org.mockito.Mockito.mock;
import org.apache.gobblin.audit.values.auditor.ValueAuditGenerator;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.audit.values.policy.column.ProjectAllColumnProjectionPolicy;
import org.apache.gobblin.audit.values.policy.row.SelectAllRowSelectionPolicy;
import org.apache.avro.generic.GenericRecord;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
@Test(groups = {"gobblin.audit.values"})
public class ValueAuditGeneratorTest {
@Test
public void testConstructor() throws Exception {
Config config =ConfigFactory.parseMap(ImmutableMap.of(
"columnProjection.class", ProjectAllColumnProjectionPolicy.class.getCanonicalName(),
"rowSelection.class", SelectAllRowSelectionPolicy.class.getCanonicalName(),
"auditSink.class", MockSink.class.getCanonicalName()));
ValueAuditRuntimeMetadata runtimeMetadata = mock(ValueAuditRuntimeMetadata.class, Mockito.RETURNS_SMART_NULLS);
ValueAuditGenerator auditGenerator = ValueAuditGenerator.create(config, runtimeMetadata);
auditGenerator.audit(mock(GenericRecord.class, Mockito.RETURNS_SMART_NULLS));
Assert.assertEquals(auditGenerator.getRowSelectionPolicy().getClass().getCanonicalName(), SelectAllRowSelectionPolicy.class.getCanonicalName());
Assert.assertEquals(auditGenerator.getColumnProjectionPolicy().getClass().getCanonicalName(), ProjectAllColumnProjectionPolicy.class.getCanonicalName());
Assert.assertEquals(auditGenerator.getAuditSink().getClass().getCanonicalName(), MockSink.class.getCanonicalName());
}
@Test
public void testConstructorWithAlias() throws Exception {
Config config =ConfigFactory.parseMap(ImmutableMap.of(
"columnProjection.class", "ProjectAll",
"rowSelection.class", "SelectAll",
"auditSink.class", "MockSink"));
ValueAuditRuntimeMetadata runtimeMetadata = mock(ValueAuditRuntimeMetadata.class, Mockito.RETURNS_SMART_NULLS);
ValueAuditGenerator auditGenerator = ValueAuditGenerator.create(config, runtimeMetadata);
auditGenerator.audit(mock(GenericRecord.class, Mockito.RETURNS_SMART_NULLS));
Assert.assertEquals(auditGenerator.getRowSelectionPolicy().getClass().getCanonicalName(), SelectAllRowSelectionPolicy.class.getCanonicalName());
Assert.assertEquals(auditGenerator.getColumnProjectionPolicy().getClass().getCanonicalName(), ProjectAllColumnProjectionPolicy.class.getCanonicalName());
Assert.assertEquals(auditGenerator.getAuditSink().getClass().getCanonicalName(), MockSink.class.getCanonicalName());
}
}
| 3,988 |
0 | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit | Create_ds/gobblin/gobblin-audit/src/test/java/org/apache/gobblin/audit/values/ValueAuditRuntimeMetadataTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values;
import static org.mockito.Mockito.mock;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata.Phase;
import org.apache.avro.Schema;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = { "gobblin.audit.values" })
public class ValueAuditRuntimeMetadataTest {
@Test
public void testBuilderWithDefaults() throws Exception {
Schema mockSchema = mock(Schema.class, Mockito.RETURNS_SMART_NULLS);
ValueAuditRuntimeMetadata runtimeMetadata = ValueAuditRuntimeMetadata.builder("db", "t", mockSchema).build();
Assert.assertEquals(runtimeMetadata.getTableMetadata().getDatabase(), "db");
Assert.assertEquals(runtimeMetadata.getTableMetadata().getTable(), "t");
Assert.assertEquals(runtimeMetadata.getTableMetadata().getTableSchema(), mockSchema);
Assert.assertEquals(runtimeMetadata.getCluster(), "NA");
Assert.assertEquals(runtimeMetadata.getDeltaId(), "NA");
Assert.assertEquals(runtimeMetadata.getExtractId(), "NA");
Assert.assertEquals(runtimeMetadata.getPartFileName(), "NA");
Assert.assertEquals(runtimeMetadata.getSnapshotId(), "NA");
Assert.assertEquals(runtimeMetadata.getPhase(), Phase.NA);
}
@Test
public void testBuilder() throws Exception {
Schema mockSchema = mock(Schema.class, Mockito.RETURNS_SMART_NULLS);
ValueAuditRuntimeMetadata runtimeMetadata =
ValueAuditRuntimeMetadata.builder("db", "t", mockSchema).cluster("c").deltaId("d").extractId("e")
.partFileName("p").phase(Phase.AVRO_CONV).snapshotId("s").build();
Assert.assertEquals(runtimeMetadata.getTableMetadata().getDatabase(), "db");
Assert.assertEquals(runtimeMetadata.getTableMetadata().getTable(), "t");
Assert.assertEquals(runtimeMetadata.getTableMetadata().getTableSchema(), mockSchema);
Assert.assertEquals(runtimeMetadata.getCluster(), "c");
Assert.assertEquals(runtimeMetadata.getDeltaId(), "d");
Assert.assertEquals(runtimeMetadata.getExtractId(), "e");
Assert.assertEquals(runtimeMetadata.getPartFileName(), "p");
Assert.assertEquals(runtimeMetadata.getSnapshotId(), "s");
Assert.assertEquals(runtimeMetadata.getPhase(), Phase.AVRO_CONV);
}
}
| 3,989 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/sink/FsAuditSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.sink;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import lombok.Getter;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
/**
* A Hadoop {@link FileSystem} based {@link AuditSink} that writes audit {@link GenericRecord}s to a file on {@link FileSystem}.
* <ul>
* <li> The {@link FileSystem} {@link URI} can be set using key {@link ConfigurationKeys#FS_URI_KEY}, {@link ConfigurationKeys#LOCAL_FS_URI} is used by default.
* <li> All audit files are written under the base path. The base path can be set using key {@link #FS_SINK_AUDIT_OUTPUT_PATH_KEY}.
* The default path is,
* <pre>
* <code>System.getProperty("user.dir") + "/lumos_value_audit/local_audit";</code>
* </pre>
* <li> It uses <code>auditMetadata</code> to build the audit file name and path.<br>
* <b>The layout on {@link FileSystem} - </b>
* <pre>
* |-- <Database>
* |-- <Table>
* |-- P=<PHASE>.C=<CLUSTER>.E=<EXTRACT_ID>.S=<SNAPSHOT_ID>.D=<DELTA_ID>
* |-- *.avro
* </pre>
* </ul>
*/
@Alias(value = "FsAuditSink")
public class FsAuditSink implements AuditSink {
private static final String FS_SINK_AUDIT_OUTPUT_PATH_KEY = "fs.outputDirPath";
private static final String FS_SINK_AUDIT_OUTPUT_DEFAULT_PATH = System.getProperty("user.dir") + "/lumos_value_audit/local_audit";
private static final String FILE_NAME_DELIMITTER = "_";
private final FileSystem fs;
private final OutputStream auditFileOutputStream;
private final DataFileWriter<GenericRecord> writer;
private final Closer closer = Closer.create();
private final ValueAuditRuntimeMetadata auditMetadata;
@Getter
private final Path auditDirPath;
public FsAuditSink(Config config, ValueAuditRuntimeMetadata auditMetadata) throws IOException {
this.auditDirPath = new Path(ConfigUtils.getString(config, FS_SINK_AUDIT_OUTPUT_PATH_KEY, FS_SINK_AUDIT_OUTPUT_DEFAULT_PATH));
this.fs = this.auditDirPath.getFileSystem(new Configuration());
this.auditMetadata = auditMetadata;
this.auditFileOutputStream = closer.register(fs.create(getAuditFilePath()));
DataFileWriter<GenericRecord> dataFileWriter = this.closer.register(new DataFileWriter<GenericRecord>(new GenericDatumWriter<GenericRecord>()));
this.writer = this.closer.register(dataFileWriter.create(this.auditMetadata.getTableMetadata().getTableSchema(), this.auditFileOutputStream));
}
/**
* Returns the complete path of the audit file. Generate the audit file path with format
*
* <pre>
* |-- <Database>
* |-- <Table>
* |-- P=<PHASE>.C=<CLUSTER>.E=<EXTRACT_ID>.S=<SNAPSHOT_ID>.D=<DELTA_ID>
* |-- *.avro
* </pre>
*
*/
public Path getAuditFilePath() {
StringBuilder auditFileNameBuilder = new StringBuilder();
auditFileNameBuilder.append("P=").append(auditMetadata.getPhase()).append(FILE_NAME_DELIMITTER).append("C=")
.append(auditMetadata.getCluster()).append(FILE_NAME_DELIMITTER).append("E=")
.append(auditMetadata.getExtractId()).append(FILE_NAME_DELIMITTER).append("S=")
.append(auditMetadata.getSnapshotId()).append(FILE_NAME_DELIMITTER).append("D=")
.append(auditMetadata.getDeltaId());
return new Path(auditDirPath, PathUtils.combinePaths(auditMetadata.getTableMetadata().getDatabase(), auditMetadata
.getTableMetadata().getTable(), auditFileNameBuilder.toString(), auditMetadata.getPartFileName()));
}
/**
* Append this record to the {@link DataFileWriter}
*
* {@inheritDoc}
* @see org.apache.gobblin.audit.values.sink.AuditSink#write(org.apache.avro.generic.GenericRecord)
*/
@Override
public void write(GenericRecord record) throws IOException {
this.writer.append(record);
}
@Override
public final void close() throws IOException {
this.closer.close();
}
}
| 3,990 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/sink/AuditSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.sink;
import java.io.Closeable;
import java.io.IOException;
import org.apache.avro.generic.GenericRecord;
/**
* An interface for persisting value audits
*/
public interface AuditSink extends Closeable {
/**
* Write the <code>record</code> to sink
*
* @param record to be written
* @throws IOException if writing this record failed
*/
public void write(GenericRecord record) throws IOException;
}
| 3,991 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/sink/DefaultAuditSinkFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.sink;
import java.lang.reflect.InvocationTargetException;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.typesafe.config.Config;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Default factory class to create new {@link AuditSink}s
*/
@Slf4j
public class DefaultAuditSinkFactory {
private static final String AUDIT_SINK_CLASS_NAME_KEY = "class";
private static final String DEFAULT_AUDIT_SINK_CLASS = FsAuditSink.class.getCanonicalName();
private final ClassAliasResolver<AuditSink> aliasResolver;
private DefaultAuditSinkFactory() {
this.aliasResolver = new ClassAliasResolver<>(AuditSink.class);
}
/**
* Create a new {@link AuditSink} using the alias or cannonical classname specified at {@value #AUDIT_SINK_CLASS_NAME_KEY} in the <code>config</code>
* The {@link AuditSink} class MUST have an accessible constructor <code>abc(Config config, TableMetadata tableMetadata)</code>
* <br>
* If {@value #AUDIT_SINK_CLASS_NAME_KEY} is not set in <code>config</code>, a default {@link #DEFAULT_AUDIT_SINK_CLASS} is used
*
* @param config job configs
* @param auditRuntimeMetadata runtime table metadata
*
* @return a new instance of {@link AuditSink}
*/
public AuditSink create(Config config, ValueAuditRuntimeMetadata auditRuntimeMetadata) {
String sinkClassName = DEFAULT_AUDIT_SINK_CLASS;
if (config.hasPath(AUDIT_SINK_CLASS_NAME_KEY)) {
sinkClassName = config.getString(AUDIT_SINK_CLASS_NAME_KEY);
}
log.info("Using audit sink class name/alias " + sinkClassName);
try {
return (AuditSink)ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve(
sinkClassName)), config, auditRuntimeMetadata);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
private static class DefaultAuditSinkFactoryHolder {
private static final DefaultAuditSinkFactory INSTANCE = new DefaultAuditSinkFactory();
}
public static DefaultAuditSinkFactory getInstance() {
return DefaultAuditSinkFactoryHolder.INSTANCE;
}
}
| 3,992 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/auditor/ValueAuditGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.auditor;
import java.io.Closeable;
import java.io.IOException;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.avro.generic.GenericRecord;
import com.typesafe.config.Config;
import org.apache.gobblin.audit.values.policy.column.ColumnProjectionPolicy;
import org.apache.gobblin.audit.values.policy.column.DefaultColumnProjectionPolicyFactory;
import org.apache.gobblin.audit.values.policy.row.DefaultRowSelectionPolicyFactory;
import org.apache.gobblin.audit.values.policy.row.RowSelectionPolicy;
import org.apache.gobblin.audit.values.sink.AuditSink;
import org.apache.gobblin.audit.values.sink.DefaultAuditSinkFactory;
/**
* The class that implements value based auditing. The class captures the values of certain
* columns from the rows in the dataset using the {@link ColumnProjectionPolicy}.
* This is done for every row or for a sample of the rows as defined by the {@link RowSelectionPolicy}.
* The selected rows are then written to the {@link AuditSink}
*
* {@link ValueAuditGenerator#audit(GenericRecord)} is the method that audits an inputRecord.
*/
@AllArgsConstructor
@Getter
public class ValueAuditGenerator implements Closeable {
public static final String COLUMN_PROJECTION_CONFIG_SCOPE = "columnProjection";
public static final String ROW_SELECTION_CONFIG_SCOPE = "rowSelection";
public static final String AUDIT_SINK_CONFIG_SCOPE = "auditSink";
private final ColumnProjectionPolicy columnProjectionPolicy;
private final RowSelectionPolicy rowSelectionPolicy;
private final AuditSink auditSink;
/**
* Factory method to create a new {@link ValueAuditGenerator}
* @param config job configs
* @param runtimeAuditMetadata is used to pass the table specific runtime information like tablename, databaseName, snapshotName etc.
* See {@link ValueAuditRuntimeMetadata}
* @return a new {@link ValueAuditGenerator}
*/
public static ValueAuditGenerator create(Config config, ValueAuditRuntimeMetadata runtimeAuditMetadata) {
ColumnProjectionPolicy columnProjectionPolicy = DefaultColumnProjectionPolicyFactory.getInstance().create(
config.getConfig(COLUMN_PROJECTION_CONFIG_SCOPE),runtimeAuditMetadata.getTableMetadata());
RowSelectionPolicy rowSelectionPolicy = DefaultRowSelectionPolicyFactory.getInstance().create(
config.getConfig(ROW_SELECTION_CONFIG_SCOPE), runtimeAuditMetadata.getTableMetadata(), columnProjectionPolicy);
AuditSink auditSink = DefaultAuditSinkFactory.getInstance().create(
config.getConfig(AUDIT_SINK_CONFIG_SCOPE), runtimeAuditMetadata);
return new ValueAuditGenerator(columnProjectionPolicy, rowSelectionPolicy, auditSink);
}
/**
* Write an audit record for the <code>inputRecord</code> to the {@link AuditSink}.
* An audit record is generated for every <code>inputRecord</code> that satisfies the {@link RowSelectionPolicy}.
* An audit record is created by projecting <code>inputRecord</code> using the {@link ColumnProjectionPolicy}
*
* @param inputRecord to be audited
* @throws IOException if auditing failed for this record
*/
public void audit(GenericRecord inputRecord) throws IOException {
if (this.rowSelectionPolicy.shouldSelectRow(inputRecord)) {
auditSink.write(columnProjectionPolicy.project(inputRecord));
}
}
@Override
public void close() throws IOException {
this.auditSink.close();
}
}
| 3,993 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/auditor/ValueAuditRuntimeMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.auditor;
import java.util.List;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.ToString;
import org.apache.avro.Schema;
/**
* A container for table specific runtime Metadata required for auditing a table.
* Use {@link ValueAuditRuntimeMetadataBuilder} to instantiate a new {@link ValueAuditRuntimeMetadata}.
* <code>database, table and tableSchema</code> are required fields in the {@link ValueAuditRuntimeMetadata}.
* All other fields <code>phase, cluster, extractId, snapshotId, deltaId, partFileName</code> are marked as {@value #DEFAULT_VALUE}
* if not present.
*/
@AllArgsConstructor(access = AccessLevel.PRIVATE)
@Getter
@ToString
@EqualsAndHashCode
public class ValueAuditRuntimeMetadata {
private static final String DEFAULT_VALUE = "NA";
/**
* <i>Required - </i>Table specific metadata like <code>database, table and tableSchema</code>
*/
private TableMetadata tableMetadata;
/**
* <i>Optional - </i>The snapshot generation phase being audited
*/
private Phase phase;
/**
* <i>Optional - </i>Audited snapshot data's cluster
*/
private String cluster;
/**
* <i>Optional - </i>Extract Id of the snapshot
*/
private String extractId;
/**
* <i>Optional - </i>Snapshot Id being audited
*/
private String snapshotId;
/**
* <i>Optional - </i>Delta Id of the snapshot
*/
private String deltaId;
/**
* <i>Optional - </i>Part file in the snapshot being audited
*/
private String partFileName;
public static ValueAuditRuntimeMetadataBuilder builder(String databaseName, String tableName, Schema tableSchema) {
return new ValueAuditRuntimeMetadataBuilder(databaseName, tableName, tableSchema);
}
/**
* Container for table specific metadata
*/
@Getter
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public static class TableMetadata {
/**
* <i>Required - </i> database name
*/
@NonNull private String database;
/**
* <i>Required - </i> table name
*/
@NonNull private String table;
/**
* <i>Required - </i> table schema
*/
@NonNull private Schema tableSchema;
/**
* <i>Optional - </i> list of key fields in the table that uniquely identify a row.
* Each entry in the list is an ordered string specifying the location of the nested key field
* For example, field1.nestedField1 refers to the field "nestedField1" inside of field "field1" of the record.
*/
private List<String> keyFieldLocations;
/**
* <i>Optional - </i> list of delta fields in the table that are used to track changes in the row over time.
* Each entry in the list is an ordered string specifying the location of the nested delta field
* For example, field1.nestedField1 refers to the field "nestedField1" inside of field "field1" of the record.
*/
private List<String> deltaFieldLocations;
}
/**
* An enum for all phases snapshot generation
*/
public static enum Phase {
PULL("Pull from extract"),
AVRO_CONV("Convert to avro"),
SS_GEN("Snapshot Generation, LSB"),
SS_UPD("Snapshot update, VSB"),
SS_MAT("Snapshot materialization, QSB"),
SS_PUB("Publish Snapshot"),
NA("Not Applicable");
private String description;
Phase(String description) {
this.description = description;
}
public String getDescription() {
return this.description;
}
}
/**
* Builder to build A {@link ValueAuditRuntimeMetadata}, <code>databaseName, tableName and tableSchema</code> are required
*/
public static class ValueAuditRuntimeMetadataBuilder {
private TableMetadata tableMetadata;
private Phase phase = Phase.NA;
private String cluster = DEFAULT_VALUE;
private String extractId = DEFAULT_VALUE;
private String snapshotId = DEFAULT_VALUE;
private String deltaId = DEFAULT_VALUE;
private String partFileName = DEFAULT_VALUE;
public ValueAuditRuntimeMetadataBuilder(String databaseName, String tableName, Schema tableSchema) {
this.tableMetadata = new TableMetadata(databaseName, tableName, tableSchema);
}
public ValueAuditRuntimeMetadataBuilder phase(final Phase phase) {
this.phase = phase;
return this;
}
public ValueAuditRuntimeMetadataBuilder cluster(final String cluster) {
this.cluster = cluster;
return this;
}
public ValueAuditRuntimeMetadataBuilder extractId(final String extractId) {
this.extractId = extractId;
return this;
}
public ValueAuditRuntimeMetadataBuilder snapshotId(final String snapshotId) {
this.snapshotId = snapshotId;
return this;
}
public ValueAuditRuntimeMetadataBuilder deltaId(final String deltaId) {
this.deltaId = deltaId;
return this;
}
public ValueAuditRuntimeMetadataBuilder partFileName(final String partFileName) {
this.partFileName = partFileName;
return this;
}
public ValueAuditRuntimeMetadataBuilder tableMetadataKeyFieldLocations(List<String> keyFieldLocations) {
this.tableMetadata.keyFieldLocations = keyFieldLocations;
return this;
}
public ValueAuditRuntimeMetadataBuilder tableMetadataDeltaFieldLocations(List<String> deltaFieldLocations) {
this.tableMetadata.keyFieldLocations = deltaFieldLocations;
return this;
}
public ValueAuditRuntimeMetadata build() {
return new ValueAuditRuntimeMetadata(tableMetadata, phase, cluster, extractId, snapshotId, deltaId, partFileName);
}
}
}
| 3,994 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy/column/ColumnProjectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.policy.column;
import java.util.List;
import org.apache.avro.generic.GenericRecord;
/**
* An interface that projects certain columns/fields of an input {@link GenericRecord} to generate a new {@link GenericRecord} that can be audited.
* The field locations to project are an ordered string specifying the location of the nested field to retrieve.
* For example, field1.nestedField1 takes the the value of the field "field1" of the record, and retrieves the field "nestedField1" from it
*/
public interface ColumnProjectionPolicy {
/**
* Get the key aka unique identifier fields to project in a {@link GenericRecord}
*/
public List<String> getKeyColumnsToProject();
/**
* Get the delta fields to project in a {@link GenericRecord}. These are the fields used to determine changes over time.
*/
public List<String> getDeltaColumnsToProject();
/**
* A union of {@link #getKeyColumnsToProject()} and {@link #getDeltaColumnsToProject()}
*/
public List<String> getAllColumnsToProject();
/**
* Project key and delta columns/fields of the <code>inputRecord</code> and return a new {@link GenericRecord} with only the projected columns/fields
* @param inputRecord the original record with all columns/fields
* @return a new {@link GenericRecord} with only {@link #getAllColumnsToProject()}
*/
public GenericRecord project(GenericRecord inputRecord);
}
| 3,995 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy/column/DefaultColumnProjectionPolicyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.policy.column;
import java.lang.reflect.InvocationTargetException;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Default factory class to create new {@link ColumnProjectionPolicy}s
*/
@Slf4j
public class DefaultColumnProjectionPolicyFactory {
private static final String COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY = "class";
private final ClassAliasResolver<ColumnProjectionPolicy> aliasResolver;
private DefaultColumnProjectionPolicyFactory() {
this.aliasResolver = new ClassAliasResolver<>(ColumnProjectionPolicy.class);
}
/**
* Create a new {@link ColumnProjectionPolicy} using the alias or cannonical classname specified at {@value #COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY} in the <code>config</code>
* The {@link ColumnProjectionPolicy} class MUST have an accessible constructor <code>abc(Config config, TableMetadata tableMetadata)</code>
* <b>Note : Must have the key {@value #COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY} set in <code>config</code> to create the {@link ColumnProjectionPolicy}</b>
*
* @param config job configs, Must have the key {@value #COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY} set to create the {@link ColumnProjectionPolicy}
* @param tableMetadata runtime table metadata
*
* @return a new instance of {@link ColumnProjectionPolicy}
*/
public ColumnProjectionPolicy create(Config config, ValueAuditRuntimeMetadata.TableMetadata tableMetadata) {
Preconditions.checkArgument(config.hasPath(COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY));
log.info("Using column projection class name/alias " + config.getString(COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY));
try {
return (ColumnProjectionPolicy)ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve(
config.getString(COLUMN_PROJECTION_POLICY_CLASS_NAME_KEY))), config, tableMetadata);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
private static class DefaultColumnProjectionPolicyFactoryHolder {
private static final DefaultColumnProjectionPolicyFactory INSTANCE = new DefaultColumnProjectionPolicyFactory();
}
public static DefaultColumnProjectionPolicyFactory getInstance() {
return DefaultColumnProjectionPolicyFactoryHolder.INSTANCE;
}
}
| 3,996 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy/column/ProjectAllColumnProjectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.policy.column;
import java.util.List;
import org.apache.avro.generic.GenericRecord;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
/**
* An {@link AbstractColumnProjectionPolicy} that projects all columns/fields of the <code>inputRecord</code>
*/
@Alias(value = "ProjectAll")
public class ProjectAllColumnProjectionPolicy extends AbstractColumnProjectionPolicy {
public ProjectAllColumnProjectionPolicy(Config config, ValueAuditRuntimeMetadata.TableMetadata tableMetadata) {
super(config, tableMetadata);
}
/**
* Return the entire <code>inputRecord</code>. All fields are projected
*
* {@inheritDoc}
* @see org.apache.gobblin.audit.values.policy.column.ColumnProjectionPolicy#project(org.apache.avro.generic.GenericRecord)
*/
@Override
public GenericRecord project(GenericRecord inputRecord) {
return inputRecord;
}
@Override
public List<String> getKeyColumnsToProject() {
return this.tableMetadata.getKeyFieldLocations();
}
@Override
public List<String> getDeltaColumnsToProject() {
return this.tableMetadata.getDeltaFieldLocations();
}
}
| 3,997 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy/column/AbstractColumnProjectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.policy.column;
import java.util.List;
import org.apache.avro.Schema;
import com.google.common.collect.ImmutableList;
import com.typesafe.config.Config;
import org.apache.gobblin.audit.values.auditor.ValueAuditRuntimeMetadata;
/**
* A base {@link ColumnProjectionPolicy} that reads <code>config</code> to initialize the key and delta columns to project for a table.
* <ul>
* <li> Key and delta fields/column locations to project should be provided by concrete subclasses by implementing {@link #getDeltaColumnsToProject()}
* and {@link #getKeyColumnsToProject()}
* <li> The protected member <code>tableMetadata</code> contains the {@link Schema}, tableName and databaseName to derive the projection columns
* </ul>
*/
public abstract class AbstractColumnProjectionPolicy implements ColumnProjectionPolicy {
protected final ValueAuditRuntimeMetadata.TableMetadata tableMetadata;
public AbstractColumnProjectionPolicy(Config config, ValueAuditRuntimeMetadata.TableMetadata tableMetadata) {
this.tableMetadata = tableMetadata;
}
/**
* Combine both key columns and delta columns to project
* {@inheritDoc}
* @see org.apache.gobblin.audit.values.policy.column.ColumnProjectionPolicy#getAllColumnsToProject()
*/
public List<String> getAllColumnsToProject() {
return ImmutableList.<String> builder().addAll(getKeyColumnsToProject()).addAll(getDeltaColumnsToProject()).build();
}
}
| 3,998 |
0 | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy | Create_ds/gobblin/gobblin-audit/src/main/java/org/apache/gobblin/audit/values/policy/row/RowSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.audit.values.policy.row;
import org.apache.avro.generic.GenericRecord;
/**
* An interface to decide if a row needs to be audited
*/
public interface RowSelectionPolicy {
/**
* Finds if this <code>genericRecord</code> needs to be audited
*
* @param inputRecord to be audited
* @return <code>true</code> if <code>inputRecord</code> needs to be audited <code>false</code> otherwise
*/
public boolean shouldSelectRow(GenericRecord inputRecord);
}
| 3,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.