index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/CliMRJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.IOException;
import java.util.Properties;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.io.Closer;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.app.ApplicationException;
import org.apache.gobblin.runtime.app.ApplicationLauncher;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.runtime.cli.CliOptions;
import org.apache.gobblin.runtime.listeners.JobListener;
/**
* A utility class for launching a Gobblin Hadoop MR job through the command line.
*
* @author Yinan Li
*/
@Slf4j
public class CliMRJobLauncher extends Configured implements ApplicationLauncher, JobLauncher, Tool {
private final Closer closer = Closer.create();
private final ApplicationLauncher applicationLauncher;
private final MRJobLauncher mrJobLauncher;
public CliMRJobLauncher(Configuration conf, Properties jobProperties) throws Exception {
log.debug("Configuration: {}", conf);
log.debug("Job properties: {}", jobProperties);
setConf(conf);
this.applicationLauncher = this.closer.register(new ServiceBasedAppLauncher(jobProperties,
jobProperties.getProperty(ServiceBasedAppLauncher.APP_NAME, "CliMRJob-" + UUID.randomUUID())));
this.mrJobLauncher = this.closer.register(new MRJobLauncher(jobProperties, getConf(), null));
}
@Override
public int run(String[] args) throws Exception {
try {
start();
launchJob(null);
} finally {
try {
stop();
} finally {
close();
}
}
return 0;
}
@Override
public void start() throws ApplicationException {
this.applicationLauncher.start();
}
@Override
public void stop() throws ApplicationException {
this.applicationLauncher.stop();
}
@Override
public void launchJob(@Nullable JobListener jobListener) throws JobException {
this.mrJobLauncher.launchJob(jobListener);
}
@Override
public void cancelJob(@Nullable JobListener jobListener) throws JobException {
this.mrJobLauncher.cancelJob(jobListener);
}
@Override
public void close() throws IOException {
this.closer.close();
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
// Parse generic options
String[] genericCmdLineOpts = new GenericOptionsParser(conf, args).getCommandLine().getArgs();
Properties jobProperties = CliOptions.parseArgs(CliMRJobLauncher.class, genericCmdLineOpts);
// Launch and run the job
System.exit(ToolRunner.run(new CliMRJobLauncher(conf, jobProperties), args));
}
}
| 1,600 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/CustomizedProgresser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import org.apache.hadoop.mapreduce.Mapper;
/**
* Interfaces methods to obtain customized progress
*/
public interface CustomizedProgresser {
/**
* Create an instance of {@link CustomizedProgresser} from Hadoop Mapper's context
* which contains both MR-related configuration to measure real progress, and Gobblin-job configurations.
*/
interface Factory {
public CustomizedProgresser createCustomizedProgresser(Mapper.Context mapperContext);
}
/**
* Calculating progress based on application's requirement.
* e.g. For Gobblin-Kafka in batch mode, the number of records being written divided by offsets calculated
* from workunit-planning is one of the good metric to measure mapper's progress.
*/
float getCustomizedProgress();
}
| 1,601 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/GobblinOutputCommitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.GobblinMultiTaskAttempt;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.JobLauncherUtils;
import lombok.Getter;
/**
* Hadoop {@link OutputCommitter} implementation that overrides the default
* {@link #abortJob(JobContext, org.apache.hadoop.mapreduce.JobStatus.State)} behavior.
* This is necessary to add functionality for cleaning up staging data when the
* {@link org.apache.gobblin.runtime.JobLauncher#cancelJob(JobListener)} method is
* called via Azkaban. Azkaban only allows the cancel method run to for 5 ms until it
* does a hard kill on the process. In order to make sure the staging data still gets
* cleaned-up, the cleanup will take place in the AM.
*/
public class GobblinOutputCommitter extends OutputCommitter {
private static final Logger LOG = LoggerFactory.getLogger(GobblinOutputFormat.class);
@Getter
private Map<String, GobblinMultiTaskAttempt> attemptIdToMultiTaskAttempt = new ConcurrentHashMap<>();
@Override
public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException {
LOG.info("Aborting Job: " + jobContext.getJobID() + " with state: " + state);
Configuration conf = jobContext.getConfiguration();
URI fsUri = URI.create(conf.get(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI));
FileSystem fs = FileSystem.get(fsUri, conf);
Path mrJobDir = new Path(conf.get(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY), conf.get(ConfigurationKeys.JOB_NAME_KEY));
Path jobInputDir = new Path(mrJobDir, MRJobLauncher.INPUT_DIR_NAME);
if (!fs.exists(jobInputDir) || !fs.isDirectory(jobInputDir)) {
LOG.warn(String.format("%s either does not exist or is not a directory. No data to cleanup.", jobInputDir));
return;
}
// Iterate through all files in the jobInputDir, each file should correspond to a serialized wu or mwu
try {
for (FileStatus status : fs.listStatus(jobInputDir, new WorkUnitFilter())) {
Closer workUnitFileCloser = Closer.create();
WorkUnit wu = JobLauncherUtils.createEmptyWorkUnitPerExtension(status.getPath());
try {
wu.readFields(workUnitFileCloser.register(new DataInputStream(fs.open(status.getPath()))));
} finally {
workUnitFileCloser.close();
}
if (wu instanceof MultiWorkUnit) {
for (WorkUnit eachWU : ((MultiWorkUnit) wu).getWorkUnits()) {
JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(eachWU), LOG);
}
} else {
JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(wu), LOG);
}
}
} finally {
try {
cleanUpWorkingDirectory(mrJobDir, fs);
} finally {
super.abortJob(jobContext, state);
}
}
}
@Override
public void abortTask(TaskAttemptContext arg0) throws IOException {}
@Override
public void commitTask(TaskAttemptContext arg0) throws IOException {
String taskAttemptId = arg0.getTaskAttemptID().toString();
LOG.info("Committing task attempt: "+ taskAttemptId);
this.attemptIdToMultiTaskAttempt.get(taskAttemptId).commit();
}
@Override
public boolean needsTaskCommit(TaskAttemptContext arg0) throws IOException {
return this.attemptIdToMultiTaskAttempt.containsKey(arg0.getTaskAttemptID().toString());
}
@Override
public void setupJob(JobContext arg0) throws IOException {}
@Override
public void setupTask(TaskAttemptContext arg0) throws IOException {}
/**
* Replicates the default behavior of the {@link OutputCommitter} used by
* {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat}.
* @return true
*/
@Override
public boolean isRecoverySupported() {
return true;
}
/**
* Replicates the default behavior of the {@link OutputCommitter} used by
* {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat}.
*/
@Override
public void recoverTask(TaskAttemptContext taskContext) throws IOException {}
/**
* Cleanup the Hadoop MR working directory.
*/
private static void cleanUpWorkingDirectory(Path mrJobDir, FileSystem fs) throws IOException {
if (fs.exists(mrJobDir)) {
fs.delete(mrJobDir, true);
LOG.info("Deleted working directory " + mrJobDir);
}
}
private static class WorkUnitFilter implements PathFilter {
@Override
public boolean accept(Path path) {
return JobLauncherUtils.hasAnyWorkUnitExtension(path);
}
}
}
| 1,602 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/MRJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.ServiceManager;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValue;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.fsm.FiniteStateMachine;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.CountEventBuilder;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.JobStateEventBuilder;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory;
import org.apache.gobblin.runtime.GobblinMultiTaskAttempt;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.Task;
import org.apache.gobblin.runtime.TaskExecutor;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.runtime.TaskStateCollectorService;
import org.apache.gobblin.runtime.TaskStateTracker;
import org.apache.gobblin.runtime.job.GobblinJobFiniteStateMachine;
import org.apache.gobblin.runtime.job.GobblinJobFiniteStateMachine.JobFSMState;
import org.apache.gobblin.runtime.job.GobblinJobFiniteStateMachine.StateType;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooterFactory;
import org.apache.gobblin.runtime.util.JobMetrics;
import org.apache.gobblin.runtime.util.MetricGroup;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.JobConfigurationUtils;
import org.apache.gobblin.util.JobLauncherUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.SerializationUtils;
import org.apache.gobblin.util.reflection.RestrictedFieldAccessingUtils;
/**
* An implementation of {@link JobLauncher} that launches a Gobblin job as a Hadoop MR job.
*
* <p>
* The basic idea of this implementation is to use mappers as containers to run tasks.
* In the Hadoop MP job, each mapper is responsible for executing one or more tasks.
* A mapper uses its input to get the paths of the files storing serialized work units,
* deserializes the work units and creates tasks, and executes the tasks in a thread
* pool. {@link TaskExecutor} and {@link Task} remain the same as in local single-node
* mode. Each mapper writes out task states upon task completion.
* </p>
*
* @author Yinan Li
*/
@Slf4j
public class MRJobLauncher extends AbstractJobLauncher {
private static final String INTERRUPT_JOB_FILE_NAME = "_INTERRUPT_JOB";
private static final String GOBBLIN_JOB_INTERRUPT_PATH_KEY = "gobblin.jobInterruptPath";
private static final Logger LOG = LoggerFactory.getLogger(MRJobLauncher.class);
private static final String JOB_NAME_PREFIX = "Gobblin-";
private static final String JARS_DIR_NAME = "_jars";
private static final String FILES_DIR_NAME = "_files";
static final String INPUT_DIR_NAME = "input";
private static final String OUTPUT_DIR_NAME = "output";
private static final String SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY = "MRJobLauncher.serializePreviousWorkunitStates";
private static final boolean DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES = true;
/**
* In MR-mode, it is necessary to enable customized progress if speculative execution is required.
*/
private static final String ENABLED_CUSTOMIZED_PROGRESS = "MRJobLauncher.enabledCustomizedProgress";
// Configuration that make uploading of jar files more reliable,
// since multiple Gobblin Jobs are sharing the same jar directory.
private static final int MAXIMUM_JAR_COPY_RETRY_TIMES_DEFAULT = 5;
private static final int WAITING_TIME_ON_IMCOMPLETE_UPLOAD = 3000;
public static final String MR_TYPE_KEY = ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX + "mr.type";
public static final String MAPPER_TASK_NUM_KEY = ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX + "reporting.mapper.task.num";
public static final String MAPPER_TASK_ATTEMPT_NUM_KEY = ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX + "reporting.mapper.task.attempt.num";
public static final String REDUCER_TASK_NUM_KEY = ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX + "reporting.reducer.task.num";
public static final String REDUCER_TASK_ATTEMPT_NUM_KEY = ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX + "reporting.reducer.task.attempt.num";
private static final Splitter SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private final Configuration conf;
private final FileSystem fs;
private final Job job;
private final Path mrJobDir;
private final Path jarsDir;
/** A location to store jars that should not be shared between different jobs. */
private final Path unsharedJarsDir;
private final Path jobInputPath;
private final Path jobOutputPath;
private final boolean shouldPersistWorkUnitsThenCancel;
private final int parallelRunnerThreads;
private final TaskStateCollectorService taskStateCollectorService;
private volatile boolean hadoopJobSubmitted = false;
private final StateStore<TaskState> taskStateStore;
private final int jarFileMaximumRetry;
private final Path interruptPath;
private final GobblinJobFiniteStateMachine fsm;
public MRJobLauncher(Properties jobProps) throws Exception {
this(jobProps, null);
}
public MRJobLauncher(Properties jobProps, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) throws Exception {
this(jobProps, new Configuration(), instanceBroker);
}
public MRJobLauncher(Properties jobProps, Configuration conf, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) throws Exception {
this(jobProps, conf, instanceBroker, ImmutableList.of());
}
public MRJobLauncher(Properties jobProps, SharedResourcesBroker<GobblinScopeTypes> instanceBroker, List<? extends Tag<?>> metadataTags) throws Exception {
this(jobProps, new Configuration(), instanceBroker, metadataTags);
}
public MRJobLauncher(Properties jobProps, Configuration conf, SharedResourcesBroker<GobblinScopeTypes> instanceBroker,List<? extends Tag<?>> metadataTags)
throws Exception {
super(jobProps, metadataTags);
this.fsm = GobblinJobFiniteStateMachine.builder().jobState(jobContext.getJobState())
.interruptGracefully(this::interruptGracefully).killJob(this::killJob).build();
this.conf = conf;
// Put job configuration properties into the Hadoop configuration so they are available in the mappers
JobConfigurationUtils.putPropertiesIntoConfiguration(this.jobProps, this.conf);
// Let the job and all mappers finish even if some mappers fail
this.conf.set("mapreduce.map.failures.maxpercent", isMapperFailureFatalEnabled(this.jobProps) ? "0" : "100"); // For Hadoop 2.x
// Do not cancel delegation tokens after job has completed (HADOOP-7002)
this.conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
this.fs = buildFileSystem(jobProps, this.conf);
this.mrJobDir = new Path(
new Path(this.jobProps.getProperty(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY), this.jobContext.getJobName()),
this.jobContext.getJobId());
this.interruptPath = new Path(this.mrJobDir, INTERRUPT_JOB_FILE_NAME);
if (this.fs.exists(this.mrJobDir)) {
LOG.warn("Job working directory already exists for job " + this.jobContext.getJobName());
this.fs.delete(this.mrJobDir, true);
}
this.unsharedJarsDir = new Path(this.mrJobDir, JARS_DIR_NAME);
if (this.jobProps.containsKey(ConfigurationKeys.MR_JARS_BASE_DIR)) {
Path jarsBaseDir = new Path(this.jobProps.getProperty(ConfigurationKeys.MR_JARS_BASE_DIR));
String monthSuffix = new SimpleDateFormat("yyyy-MM").format(System.currentTimeMillis());
cleanUpOldJarsDirIfRequired(this.fs, jarsBaseDir);
this.jarsDir = new Path(jarsBaseDir, monthSuffix);
} else if (this.jobProps.containsKey(ConfigurationKeys.MR_JARS_DIR)) {
this.jarsDir = new Path(this.jobProps.getProperty(ConfigurationKeys.MR_JARS_DIR));
} else {
this.jarsDir = this.unsharedJarsDir;
}
this.fs.mkdirs(this.mrJobDir);
this.jobInputPath = new Path(this.mrJobDir, INPUT_DIR_NAME);
this.jobOutputPath = new Path(this.mrJobDir, OUTPUT_DIR_NAME);
Path outputTaskStateDir = new Path(this.jobOutputPath, this.jobContext.getJobId());
this.shouldPersistWorkUnitsThenCancel = isPersistWorkUnitsThenCancelEnabled(this.jobProps);
// Finally create the Hadoop job after all updates to conf are already made (including
// adding dependent jars/files to the DistributedCache that also updates the conf)
this.job = Job.getInstance(this.conf, JOB_NAME_PREFIX + this.jobContext.getJobName());
this.parallelRunnerThreads = Integer.parseInt(jobProps.getProperty(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY,
Integer.toString(ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS)));
// StateStore interface uses the following key (rootDir, storeName, tableName)
// The state store base is the root directory and the last two elements of the path are used as the storeName and
// tableName. Create the state store with the root at jobOutputPath. The task state will be stored at
// jobOutputPath/output/taskState.tst, so output will be the storeName.
taskStateStore = new FsStateStore<>(this.fs, jobOutputPath.toString(), TaskState.class);
this.taskStateCollectorService =
new TaskStateCollectorService(jobProps, this.jobContext.getJobState(), this.eventBus, this.eventSubmitter,
taskStateStore, outputTaskStateDir, getIssueRepository());
this.jarFileMaximumRetry =
jobProps.containsKey(ConfigurationKeys.MAXIMUM_JAR_COPY_RETRY_TIMES_KEY) ? Integer.parseInt(
jobProps.getProperty(ConfigurationKeys.MAXIMUM_JAR_COPY_RETRY_TIMES_KEY))
: MAXIMUM_JAR_COPY_RETRY_TIMES_DEFAULT;
// One of the most common user mistakes is mis-configuring the FileSystem scheme (e.g. file versus hdfs)
log.info("Configured fs:{}", fs);
log.debug("Configuration: {}", conf);
startCancellationExecutor();
}
static void cleanUpOldJarsDirIfRequired(FileSystem fs, Path jarsBaseDir) throws IOException {
List<FileStatus> jarDirs = Arrays.stream(fs.exists(jarsBaseDir)
? fs.listStatus(jarsBaseDir) : new FileStatus[0]).sorted().collect(Collectors.toList());
if (jarDirs.size() > 2) {
fs.delete(jarDirs.get(0).getPath(), true);
}
}
@Override
public void close() throws IOException {
try {
if (this.hadoopJobSubmitted && !this.job.isComplete()) {
LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId());
this.job.killJob();
}
} finally {
try {
cleanUpWorkingDirectory();
} finally {
super.close();
fs.close();
}
}
}
@Override
protected void runWorkUnits(List<WorkUnit> workUnits) throws Exception {
String jobName = this.jobContext.getJobName();
JobState jobState = this.jobContext.getJobState();
try {
CountEventBuilder countEventBuilder = new CountEventBuilder(JobEvent.WORK_UNITS_CREATED, workUnits.size());
this.eventSubmitter.submit(countEventBuilder);
LOG.info("Emitting WorkUnitsCreated Count: " + countEventBuilder.getCount());
prepareHadoopJob(workUnits);
if (this.shouldPersistWorkUnitsThenCancel) {
// NOTE: `warn` level is hack for including path among automatic troubleshooter 'issues'
LOG.warn("Cancelling job after persisting workunits beneath: " + this.jobInputPath);
jobState.setState(JobState.RunningState.CANCELLED);
return;
}
// Start the output TaskState collector service
this.taskStateCollectorService.startAsync().awaitRunning();
LOG.info("Launching Hadoop MR job " + this.job.getJobName());
try (FiniteStateMachine<JobFSMState>.Transition t = this.fsm.startTransition(this.fsm.getEndStateForType(StateType.RUNNING))) {
try {
this.job.submit();
} catch (Throwable exc) {
t.changeEndState(this.fsm.getEndStateForType(StateType.FAILED));
throw exc;
}
this.hadoopJobSubmitted = true;
// Set job tracking URL to the Hadoop job tracking URL if it is not set yet
if (!jobState.contains(ConfigurationKeys.JOB_TRACKING_URL_KEY)) {
jobState.setProp(ConfigurationKeys.JOB_TRACKING_URL_KEY, this.job.getTrackingURL());
}
/**
* Catch {@link UnallowedTransitionException} only, leaving other failure while submitting MR jobs to catch
* block afterwards.
*/
} catch (FiniteStateMachine.UnallowedTransitionException unallowed) {
LOG.error("Cannot start MR job.", unallowed);
}
if (this.fsm.getCurrentState().getStateType().equals(StateType.RUNNING)) {
TimingEvent mrJobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_JOB_RUN);
LOG.info(String.format("Waiting for Hadoop MR job %s to complete", this.job.getJobID()));
this.job.waitForCompletion(true);
this.fsm.transitionIfAllowed(fsm.getEndStateForType(StateType.SUCCESS));
mrJobRunTimer.stop(ImmutableMap.of("hadoopMRJobId", this.job.getJobID().toString()));
}
if (this.fsm.getCurrentState().getStateType().equals(StateType.CANCELLED)) {
return;
}
// Create a metrics set for this job run from the Hadoop counters.
// The metrics set is to be persisted to the metrics store later.
countersToMetrics(JobMetrics.get(jobName, this.jobProps.getProperty(ConfigurationKeys.JOB_ID_KEY)));
} catch (Throwable t) {
throw new RuntimeException("The MR job cannot be submitted due to:", t);
} finally {
JobStateEventBuilder eventBuilder = new JobStateEventBuilder(JobStateEventBuilder.MRJobState.MR_JOB_STATE);
if (!hadoopJobSubmitted) {
eventBuilder.jobTrackingURL = "";
eventBuilder.status = JobStateEventBuilder.Status.FAILED;
} else {
eventBuilder.jobTrackingURL = this.job.getTrackingURL();
eventBuilder.status = JobStateEventBuilder.Status.SUCCEEDED;
if (this.job.getJobState() != JobStatus.State.SUCCEEDED) {
eventBuilder.status = JobStateEventBuilder.Status.FAILED;
}
}
this.eventSubmitter.submit(eventBuilder);
// The last iteration of output TaskState collecting will run when the collector service gets stopped
this.taskStateCollectorService.stopAsync().awaitTerminated();
cleanUpWorkingDirectory();
}
}
@Override
protected void executeCancellation() {
try (FiniteStateMachine<JobFSMState>.Transition transition =
this.fsm.startTransition(this.fsm.getEndStateForType(StateType.CANCELLED))) {
if (transition.getStartState().getStateType().equals(StateType.RUNNING)) {
try {
killJob();
} catch (IOException ioe) {
LOG.error("Failed to kill the Hadoop MR job for job " + this.jobContext.getJobId());
transition.changeEndState(this.fsm.getEndStateForType(StateType.FAILED));
}
}
} catch (GobblinJobFiniteStateMachine.FailedTransitionCallbackException exc) {
exc.getTransition().switchEndStateToErrorState();
exc.getTransition().closeWithoutCallbacks();
} catch (FiniteStateMachine.UnallowedTransitionException | InterruptedException exc) {
LOG.error("Failed to cancel job " + this.jobContext.getJobId(), exc);
}
}
/**
* Attempt a gracious interruption of the running job
*/
private void interruptGracefully() throws IOException {
LOG.info("Attempting graceful interruption of job " + this.jobContext.getJobId());
this.fs.createNewFile(this.interruptPath);
long waitTimeStart = System.currentTimeMillis();
while (!this.job.isComplete() && System.currentTimeMillis() < waitTimeStart + 30 * 1000) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
break;
}
}
if (!this.job.isComplete()) {
LOG.info("Interrupted job did not shut itself down after timeout. Killing job.");
this.job.killJob();
}
}
private void killJob() throws IOException {
LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId());
this.job.killJob();
// Collect final task states.
this.taskStateCollectorService.stopAsync().awaitTerminated();
}
/**
* Add dependent jars and files.
*/
private void addDependencies(Configuration conf) throws IOException {
TimingEvent distributedCacheSetupTimer =
this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_DISTRIBUTED_CACHE_SETUP);
Path jarFileDir = this.jarsDir;
// Add framework jars to the classpath for the mappers/reducer
if (this.jobProps.containsKey(ConfigurationKeys.FRAMEWORK_JAR_FILES_KEY)) {
addJars(jarFileDir, this.jobProps.getProperty(ConfigurationKeys.FRAMEWORK_JAR_FILES_KEY), conf);
}
// Add job-specific jars to the classpath for the mappers
if (this.jobProps.containsKey(ConfigurationKeys.JOB_JAR_FILES_KEY)) {
addJars(jarFileDir, this.jobProps.getProperty(ConfigurationKeys.JOB_JAR_FILES_KEY), conf);
}
// Add other files (if any) the job depends on to DistributedCache
if (this.jobProps.containsKey(ConfigurationKeys.JOB_LOCAL_FILES_KEY)) {
addLocalFiles(new Path(this.mrJobDir, FILES_DIR_NAME),
this.jobProps.getProperty(ConfigurationKeys.JOB_LOCAL_FILES_KEY), conf);
}
// Add files (if any) already on HDFS that the job depends on to DistributedCache
if (this.jobProps.containsKey(ConfigurationKeys.JOB_HDFS_FILES_KEY)) {
addHDFSFiles(this.jobProps.getProperty(ConfigurationKeys.JOB_HDFS_FILES_KEY), conf);
}
// Add job-specific jars existing in HDFS to the classpath for the mappers
if (this.jobProps.containsKey(ConfigurationKeys.JOB_JAR_HDFS_FILES_KEY)) {
addHdfsJars(this.jobProps.getProperty(ConfigurationKeys.JOB_JAR_HDFS_FILES_KEY), conf);
}
distributedCacheSetupTimer.stop();
}
/**
* Prepare the Hadoop MR job, including configuring the job and setting up the input/output paths.
*/
private void prepareHadoopJob(List<WorkUnit> workUnits) throws IOException {
TimingEvent mrJobSetupTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_JOB_SETUP);
// Add dependent jars/files
addDependencies(this.job.getConfiguration());
this.job.setJarByClass(MRJobLauncher.class);
this.job.setMapperClass(TaskRunner.class);
// The job is mapper-only
this.job.setNumReduceTasks(0);
this.job.setInputFormatClass(GobblinWorkUnitsInputFormat.class);
this.job.setOutputFormatClass(GobblinOutputFormat.class);
this.job.setMapOutputKeyClass(NullWritable.class);
this.job.setMapOutputValueClass(NullWritable.class);
// Set speculative execution
this.job.setSpeculativeExecution(isSpeculativeExecutionEnabled(this.jobProps));
this.job.getConfiguration().set("mapreduce.job.user.classpath.first", "true");
// Job input path is where input work unit files are stored
// Prepare job input
prepareJobInput(workUnits);
FileInputFormat.addInputPath(this.job, this.jobInputPath);
// Job output path is where serialized task states are stored
FileOutputFormat.setOutputPath(this.job, this.jobOutputPath);
// Serialize source state to a file which will be picked up by the mappers
serializeJobState(this.fs, this.mrJobDir, this.conf, this.jobContext.getJobState(), this.job);
if (this.jobProps.containsKey(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY)) {
GobblinWorkUnitsInputFormat.setMaxMappers(this.job,
Integer.parseInt(this.jobProps.getProperty(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY)));
}
this.job.getConfiguration().set(GOBBLIN_JOB_INTERRUPT_PATH_KEY, this.interruptPath.toString());
mrJobSetupTimer.stop();
}
static boolean isBooleanPropEnabled(Properties props, String propKey, Optional<Boolean> optDefault) {
return (props.containsKey(propKey) && Boolean.parseBoolean(props.getProperty(propKey)))
|| (optDefault.isPresent() && optDefault.get());
}
static boolean isSpeculativeExecutionEnabled(Properties props) {
return isBooleanPropEnabled(props, JobContext.MAP_SPECULATIVE,
Optional.of(ConfigurationKeys.DEFAULT_ENABLE_MR_SPECULATIVE_EXECUTION));
}
static boolean isCustomizedProgressReportEnabled(Properties properties) {
return isBooleanPropEnabled(properties, ENABLED_CUSTOMIZED_PROGRESS, Optional.empty());
}
static boolean isMapperFailureFatalEnabled(Properties props) {
return isBooleanPropEnabled(props, ConfigurationKeys.MR_JOB_MAPPER_FAILURE_IS_FATAL_KEY,
Optional.of(ConfigurationKeys.DEFAULT_MR_JOB_MAPPER_FAILURE_IS_FATAL));
}
static boolean isPersistWorkUnitsThenCancelEnabled(Properties props) {
return isBooleanPropEnabled(props, ConfigurationKeys.MR_PERSIST_WORK_UNITS_THEN_CANCEL_KEY,
Optional.of(ConfigurationKeys.DEFAULT_MR_PERSIST_WORK_UNITS_THEN_CANCEL));
}
@VisibleForTesting
static void serializeJobState(FileSystem fs, Path mrJobDir, Configuration conf, JobState jobState, Job job)
throws IOException {
Path jobStateFilePath = new Path(mrJobDir, JOB_STATE_FILE_NAME);
// Write the job state with an empty task set (work units are read by the mapper from a different file)
try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath))) {
jobState.write(dataOutputStream, false,
conf.getBoolean(SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY, DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES));
}
job.getConfiguration().set(ConfigurationKeys.JOB_STATE_FILE_PATH_KEY, jobStateFilePath.toString());
DistributedCache.addCacheFile(jobStateFilePath.toUri(), job.getConfiguration());
job.getConfiguration().set(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME, jobStateFilePath.getName());
}
/**
* Add framework or job-specific jars to the classpath through DistributedCache
* so the mappers can use them.
*/
@SuppressWarnings("deprecation")
private void addJars(Path jarFileDir, String jarFileList, Configuration conf) throws IOException {
LocalFileSystem lfs = FileSystem.getLocal(conf);
for (String jarFile : SPLITTER.split(jarFileList)) {
Path srcJarFile = new Path(jarFile);
FileStatus[] fileStatusList = lfs.globStatus(srcJarFile);
for (FileStatus status : fileStatusList) {
// For each FileStatus there are chances it could fail in copying at the first attempt, due to file-existence
// or file-copy is ongoing by other job instance since all Gobblin jobs share the same jar file directory.
// the retryCount is to avoid cases (if any) where retry is going too far and causes job hanging.
int retryCount = 0;
boolean shouldFileBeAddedIntoDC = true;
Path destJarFile = calculateDestJarFile(status, jarFileDir);
// Adding destJarFile into HDFS until it exists and the size of file on targetPath matches the one on local path.
while (!this.fs.exists(destJarFile) || fs.getFileStatus(destJarFile).getLen() != status.getLen()) {
try {
if (this.fs.exists(destJarFile) && fs.getFileStatus(destJarFile).getLen() != status.getLen()) {
Thread.sleep(WAITING_TIME_ON_IMCOMPLETE_UPLOAD);
throw new IOException("Waiting for file to complete on uploading ... ");
}
// Set the first parameter as false for not deleting sourceFile
// Set the second parameter as false for not overwriting existing file on the target, by default it is true.
// If the file is preExisted but overwrite flag set to false, then an IOException if thrown.
this.fs.copyFromLocalFile(false, false, status.getPath(), destJarFile);
} catch (IOException | InterruptedException e) {
LOG.warn("Path:" + destJarFile + " is not copied successfully. Will require retry.");
retryCount += 1;
if (retryCount >= this.jarFileMaximumRetry) {
LOG.error("The jar file:" + destJarFile + "failed in being copied into hdfs", e);
// If retry reaches upper limit, skip copying this file.
shouldFileBeAddedIntoDC = false;
break;
}
}
}
if (shouldFileBeAddedIntoDC) {
// Then add the jar file on HDFS to the classpath
LOG.info(String.format("Adding %s to classpath", destJarFile));
DistributedCache.addFileToClassPath(destJarFile, conf, this.fs);
}
}
}
}
/**
* Calculate the target filePath of the jar file to be copied on HDFS,
* given the {@link FileStatus} of a jarFile and the path of directory that contains jar.
*/
private Path calculateDestJarFile(FileStatus status, Path jarFileDir) {
// SNAPSHOT jars should not be shared, as different jobs may be using different versions of it
Path baseDir = status.getPath().getName().contains("SNAPSHOT") ? this.unsharedJarsDir : jarFileDir;
// DistributedCache requires absolute path, so we need to use makeQualified.
return new Path(this.fs.makeQualified(baseDir), status.getPath().getName());
}
/**
* Add local non-jar files the job depends on to DistributedCache.
*/
@SuppressWarnings("deprecation")
private void addLocalFiles(Path jobFileDir, String jobFileList, Configuration conf) throws IOException {
DistributedCache.createSymlink(conf);
for (String jobFile : SPLITTER.split(jobFileList)) {
Path srcJobFile = new Path(jobFile);
// DistributedCache requires absolute path, so we need to use makeQualified.
Path destJobFile = new Path(this.fs.makeQualified(jobFileDir), srcJobFile.getName());
// Copy the file from local file system to HDFS
this.fs.copyFromLocalFile(srcJobFile, destJobFile);
// Create a URI that is in the form path#symlink
URI destFileUri = URI.create(destJobFile.toUri().getPath() + "#" + destJobFile.getName());
LOG.info(String.format("Adding %s to DistributedCache", destFileUri));
// Finally add the file to DistributedCache with a symlink named after the file name
DistributedCache.addCacheFile(destFileUri, conf);
}
}
/**
* Add non-jar files already on HDFS that the job depends on to DistributedCache.
*/
@SuppressWarnings("deprecation")
private void addHDFSFiles(String jobFileList, Configuration conf) {
DistributedCache.createSymlink(conf);
jobFileList = PasswordManager.getInstance(this.jobProps).readPassword(jobFileList);
for (String jobFile : SPLITTER.split(jobFileList)) {
Path srcJobFile = new Path(jobFile);
// Create a URI that is in the form path#symlink
URI srcFileUri = URI.create(srcJobFile.toUri().getPath() + "#" + srcJobFile.getName());
LOG.info(String.format("Adding %s to DistributedCache", srcFileUri));
// Finally add the file to DistributedCache with a symlink named after the file name
DistributedCache.addCacheFile(srcFileUri, conf);
}
}
private void addHdfsJars(String hdfsJarFileList, Configuration conf) throws IOException {
for (String jarFile : SPLITTER.split(hdfsJarFileList)) {
FileStatus[] status = this.fs.listStatus(new Path(jarFile));
for (FileStatus fileStatus : status) {
if (!fileStatus.isDirectory()) {
Path path = new Path(jarFile, fileStatus.getPath().getName());
LOG.info(String.format("Adding %s to classpath", path));
DistributedCache.addFileToClassPath(path, conf, this.fs);
}
}
}
}
/**
* Prepare the job input.
* @throws IOException
*/
private void prepareJobInput(List<WorkUnit> workUnits) throws IOException {
Closer closer = Closer.create();
try {
ParallelRunner parallelRunner = closer.register(new ParallelRunner(this.parallelRunnerThreads, this.fs));
int multiTaskIdSequence = 0;
// Serialize each work unit into a file named after the task ID
for (WorkUnit workUnit : workUnits) {
String workUnitFileName;
if (workUnit.isMultiWorkUnit()) {
workUnitFileName = JobLauncherUtils.newMultiTaskId(this.jobContext.getJobId(), multiTaskIdSequence++)
+ JobLauncherUtils.MULTI_WORK_UNIT_FILE_EXTENSION;
} else {
workUnitFileName = workUnit.getProp(ConfigurationKeys.TASK_ID_KEY) + JobLauncherUtils.WORK_UNIT_FILE_EXTENSION;
}
Path workUnitFile = new Path(this.jobInputPath, workUnitFileName);
LOG.debug("Writing work unit file " + workUnitFileName);
parallelRunner.serializeToFile(workUnit, workUnitFile);
// Append the work unit file path to the job input file
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
/**
* Cleanup the Hadoop MR working directory.
*/
private void cleanUpWorkingDirectory() {
try {
if (this.fs.exists(this.mrJobDir)) {
if (this.shouldPersistWorkUnitsThenCancel) {
LOG.info("Preserving persisted workunits beneath: " + this.jobInputPath);
} else {
this.fs.delete(this.mrJobDir, true);
LOG.info("Deleted working directory " + this.mrJobDir);
}
}
} catch (IOException ioe) {
LOG.error("Failed to delete working directory " + this.mrJobDir);
}
}
/**
* Create a {@link org.apache.gobblin.metrics.GobblinMetrics} instance for this job run from the Hadoop counters.
*/
@VisibleForTesting
void countersToMetrics(GobblinMetrics metrics) throws IOException {
Optional<Counters> counters = Optional.ofNullable(this.job.getCounters());
if (counters.isPresent()) {
// Write job-level counters
CounterGroup jobCounterGroup = counters.get().getGroup(MetricGroup.JOB.name());
for (Counter jobCounter : jobCounterGroup) {
metrics.getCounter(jobCounter.getName()).inc(jobCounter.getValue());
}
// Write task-level counters
CounterGroup taskCounterGroup = counters.get().getGroup(MetricGroup.TASK.name());
for (Counter taskCounter : taskCounterGroup) {
metrics.getCounter(taskCounter.getName()).inc(taskCounter.getValue());
}
}
}
private static FileSystem buildFileSystem(Properties jobProps, Configuration configuration) throws IOException {
URI fsUri = URI.create(jobProps.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI));
return FileSystem.newInstance(fsUri, configuration);
}
/**
* The mapper class that runs assigned {@link WorkUnit}s.
*
* <p>
* The {@link #map} method de-serializes a {@link WorkUnit} (maybe a {@link MultiWorkUnit})
* from each input file and add the {@link WorkUnit} (or a list of {@link WorkUnit}s if it
* is a {@link MultiWorkUnit} to the list of {@link WorkUnit}s to run. The {@link #run} method
* actually runs the list of {@link WorkUnit}s in the {@link TaskExecutor}. This allows the
* {@link WorkUnit}s to be run in parallel if the {@link TaskExecutor} is configured to have
* more than one thread in its thread pool.
* </p>
*/
public static class TaskRunner extends Mapper<LongWritable, Text, NullWritable, NullWritable> {
private FileSystem fs;
private StateStore<TaskState> taskStateStore;
private TaskExecutor taskExecutor;
private TaskStateTracker taskStateTracker;
private ServiceManager serviceManager;
private Optional<JobMetrics> jobMetrics = Optional.empty();
private boolean isSpeculativeEnabled;
private boolean customizedProgressEnabled;
private final JobState jobState = new JobState();
private CustomizedProgresser customizedProgresser;
private static final String CUSTOMIZED_PROGRESSER_FACTORY_CLASS = "customizedProgresser.factoryClass";
private static final String DEFAULT_CUSTOMIZED_PROGRESSER_FACTORY_CLASS =
"org.apache.gobblin.runtime.mapreduce.CustomizedProgresserBase$BaseFactory";
// A list of WorkUnits (flattened for MultiWorkUnits) to be run by this mapper
private final List<WorkUnit> workUnits = Lists.newArrayList();
private AutomaticTroubleshooter troubleshooter;
@Override
protected void setup(Context context) {
final State gobblinJobState = HadoopUtils.getStateFromConf(context.getConfiguration());
TaskAttemptID taskAttemptID = context.getTaskAttemptID();
troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(gobblinJobState.getProperties()));
troubleshooter.start();
try (Closer closer = Closer.create()) {
// Default for customizedProgressEnabled is false.
this.customizedProgressEnabled = isCustomizedProgressReportEnabled(gobblinJobState.getProperties());
this.isSpeculativeEnabled = isSpeculativeExecutionEnabled(gobblinJobState.getProperties());
String factoryClassName = gobblinJobState.getProperties().getProperty(
CUSTOMIZED_PROGRESSER_FACTORY_CLASS, DEFAULT_CUSTOMIZED_PROGRESSER_FACTORY_CLASS);
this.customizedProgresser = Class.forName(factoryClassName).asSubclass(CustomizedProgresser.Factory.class)
.newInstance().createCustomizedProgresser(context);
this.fs = FileSystem.get(context.getConfiguration());
this.taskStateStore =
new FsStateStore<>(this.fs, FileOutputFormat.getOutputPath(context).toUri().getPath(), TaskState.class);
String jobStateFileName = context.getConfiguration().get(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME);
Optional<URI> jobStateFileUri = getStateFileUriForJob(context.getConfiguration(), jobStateFileName);
if (jobStateFileUri.isPresent()) {
SerializationUtils.deserializeStateFromInputStream(
closer.register(new FileInputStream(jobStateFileUri.get().getPath())), this.jobState);
} else {
throw new IOException("Job state file not found: '" + jobStateFileName + "'.");
}
} catch (IOException | ReflectiveOperationException e) {
throw new RuntimeException("Failed to setup the mapper task", e);
}
// load dynamic configuration to add to the job configuration
Configuration configuration = context.getConfiguration();
Config jobStateAsConfig = ConfigUtils.propertiesToConfig(this.jobState.getProperties());
DynamicConfigGenerator dynamicConfigGenerator = DynamicConfigGeneratorFactory.createDynamicConfigGenerator(
jobStateAsConfig);
Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(jobStateAsConfig);
// add the dynamic config to the job config
for (Map.Entry<String, ConfigValue> entry : dynamicConfig.entrySet()) {
this.jobState.setProp(entry.getKey(), entry.getValue().unwrapped().toString());
configuration.set(entry.getKey(), entry.getValue().unwrapped().toString());
gobblinJobState.setProp(entry.getKey(), entry.getValue().unwrapped().toString());
}
// add some more MR task related configs
String[] tokens = taskAttemptID.toString().split("_");
TaskType taskType = taskAttemptID.getTaskType();
gobblinJobState.setProp(MR_TYPE_KEY, taskType.name());
// a task attempt id should be like 'attempt_1592863931636_2371636_m_000003_4'
if (tokens.length == 6) {
if (taskType.equals(TaskType.MAP)) {
gobblinJobState.setProp(MAPPER_TASK_NUM_KEY, tokens[tokens.length - 2]);
gobblinJobState.setProp(MAPPER_TASK_ATTEMPT_NUM_KEY, tokens[tokens.length - 1]);
} else if (taskType.equals(TaskType.REDUCE)) {
gobblinJobState.setProp(REDUCER_TASK_NUM_KEY, tokens[tokens.length - 2]);
gobblinJobState.setProp(REDUCER_TASK_ATTEMPT_NUM_KEY, tokens[tokens.length - 1]);
}
}
this.taskExecutor = new TaskExecutor(configuration);
this.taskStateTracker = new MRTaskStateTracker(context);
this.serviceManager = new ServiceManager(Lists.newArrayList(this.taskExecutor, this.taskStateTracker));
try {
this.serviceManager.startAsync().awaitHealthy(5, TimeUnit.SECONDS);
} catch (TimeoutException te) {
LOG.error("Timed out while waiting for the service manager to start up", te);
throw new RuntimeException(te);
}
// Setup and start metrics reporting if metric reporting is enabled
if (Boolean.parseBoolean(configuration.get(ConfigurationKeys.METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_ENABLED))) {
this.jobMetrics = Optional.of(JobMetrics.get(this.jobState));
try {
this.jobMetrics.get().startMetricReportingWithFileSuffix(gobblinJobState, taskAttemptID.toString());
} catch (MultiReporterException ex) {
//Fail the task if metric/event reporting failure is configured to be fatal.
boolean isMetricReportingFailureFatal = configuration.getBoolean(ConfigurationKeys.GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL,
ConfigurationKeys.DEFAULT_GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL);
boolean isEventReportingFailureFatal = configuration.getBoolean(ConfigurationKeys.GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL,
ConfigurationKeys.DEFAULT_GOBBLIN_TASK_EVENT_REPORTING_FAILURE_FATAL);
if (MetricReportUtils.shouldThrowException(LOG, ex, isMetricReportingFailureFatal, isEventReportingFailureFatal)) {
throw new RuntimeException(ex);
}
}
}
AbstractJobLauncher.setDefaultAuthenticator(this.jobState.getProperties());
}
@Override
public void run(Context context) throws IOException, InterruptedException {
this.setup(context);
Path interruptPath = new Path(context.getConfiguration().get(GOBBLIN_JOB_INTERRUPT_PATH_KEY));
if (this.fs.exists(interruptPath)) {
LOG.info(String.format("Found interrupt path %s indicating the driver has interrupted the job, aborting mapper.", interruptPath));
return;
}
GobblinMultiTaskAttempt gobblinMultiTaskAttempt = null;
try {
// De-serialize and collect the list of WorkUnits to run
while (context.nextKeyValue()) {
this.map(context.getCurrentKey(), context.getCurrentValue(), context);
}
// org.apache.hadoop.util.Progress.complete will set the progress to 1.0f eventually so we don't have to
// set it in finally block.
if (customizedProgressEnabled) {
setProgressInMapper(customizedProgresser.getCustomizedProgress(), context);
}
GobblinMultiTaskAttempt.CommitPolicy multiTaskAttemptCommitPolicy =
isSpeculativeEnabled ? GobblinMultiTaskAttempt.CommitPolicy.CUSTOMIZED
: GobblinMultiTaskAttempt.CommitPolicy.IMMEDIATE;
SharedResourcesBroker<GobblinScopeTypes> globalBroker =
SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.parseProperties(this.jobState.getProperties()),
GobblinScopeTypes.GLOBAL.defaultScopeInstance());
SharedResourcesBroker<GobblinScopeTypes> jobBroker =
globalBroker.newSubscopedBuilder(new JobScopeInstance(this.jobState.getJobName(), this.jobState.getJobId()))
.build();
// Actually run the list of WorkUnits
gobblinMultiTaskAttempt =
GobblinMultiTaskAttempt.runWorkUnits(this.jobState.getJobId(), context.getTaskAttemptID().toString(),
this.jobState, this.workUnits, this.taskStateTracker, this.taskExecutor, this.taskStateStore,
multiTaskAttemptCommitPolicy, jobBroker, troubleshooter.getIssueRepository(), (gmta) -> {
try {
return this.fs.exists(interruptPath);
} catch (IOException ioe) {
return false;
}
});
if (this.isSpeculativeEnabled) {
LOG.info("will not commit in task attempt");
GobblinOutputCommitter gobblinOutputCommitter = (GobblinOutputCommitter) context.getOutputCommitter();
gobblinOutputCommitter.getAttemptIdToMultiTaskAttempt()
.put(context.getTaskAttemptID().toString(), gobblinMultiTaskAttempt);
}
} finally {
try {
troubleshooter.refineIssues();
troubleshooter.logIssueSummary();
troubleshooter.stop();
} catch (Exception e) {
LOG.error("Failed to report issues from automatic troubleshooter", e);
}
CommitStep cleanUpCommitStep = new CommitStep() {
@Override
public boolean isCompleted() throws IOException {
return !serviceManager.isHealthy();
}
@Override
public void execute() throws IOException {
LOG.info("Starting the clean-up steps.");
try {
serviceManager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
} catch (TimeoutException te) {
// Ignored
} finally {
if (jobMetrics.isPresent()) {
try {
jobMetrics.get().stopMetricsReporting();
} catch (Throwable throwable) {
LOG.error("Failed to stop job metrics reporting.", throwable);
} finally {
GobblinMetrics.remove(jobMetrics.get().getName());
}
}
}
}
};
if (!this.isSpeculativeEnabled || gobblinMultiTaskAttempt == null) {
cleanUpCommitStep.execute();
} else {
LOG.info("Adding additional commit step");
gobblinMultiTaskAttempt.addCleanupCommitStep(cleanUpCommitStep);
}
}
}
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
this.workUnits.addAll(JobLauncherUtils.loadFlattenedWorkUnits(this.fs, new Path(value.toString())));
}
/** @return {@link URI} if a distributed cache file matches `jobStateFileName` */
protected Optional<URI> getStateFileUriForJob(Configuration conf, String jobStateFileName) throws IOException {
for (Path dcPath : DistributedCache.getLocalCacheFiles(conf)) {
if (dcPath.getName().equals(jobStateFileName)) {
return Optional.of(dcPath.toUri());
}
}
return Optional.empty();
}
/**
* Setting progress within implementation of {@link Mapper} for reporting progress.
* Gobblin (when running in MR mode) used to report progress only in {@link GobblinWorkUnitsInputFormat} while
* deserializing {@link WorkUnit} in MapReduce job. In that scenario, whenever workunit is deserialized (but not yet
* executed) the progress will be reported as 1.0f. This could implicitly disable the feature of speculative-execution
* provided by MR-framework as the latter is looking at the progress to determine if speculative-execution is necessary
* to trigger or not.
*
* Different application of Gobblin should have customized logic on calculating progress.
*/
void setProgressInMapper(float progress, Context context) {
try {
WrappedMapper.Context wrappedContext = ((WrappedMapper.Context) context);
Object contextImpl = RestrictedFieldAccessingUtils.getRestrictedFieldByReflection(wrappedContext, "mapContext", wrappedContext.getClass());
((org.apache.hadoop.mapred.Task.TaskReporter)RestrictedFieldAccessingUtils
.getRestrictedFieldByReflectionRecursively(contextImpl, "reporter", MapContextImpl.class)).setProgress(progress);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
| 1,603 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/MRTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.BaseAbstractTask;
import java.io.IOException;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
/**
* A task that runs an MR job.
*
* Usage:
* TaskUtils.setTaskFactoryClass(workUnit, MRTaskFactory.class);
* MRTask.serializeJobToState(workUnit, myJob);
*
* Subclasses can override {@link #createJob()} to customize the way the MR job is prepared.
*/
@Slf4j
public class MRTask extends BaseAbstractTask {
private static final String JOB_CONFIGURATION_PREFIX = "MRTask.jobConfiguration.";
public static class Events {
public static final String MR_JOB_STARTED_EVENT = "MRJobStarted";
public static final String MR_JOB_SUCCESSFUL = "MRJobSuccessful";
public static final String MR_JOB_FAILED = "MRJobFailed";
public static final String MR_JOB_SKIPPED = "MRJobSkipped";
public static final String JOB_URL = "jobTrackingUrl";
public static final String FAILURE_CONTEXT = "failureContext";
}
public static void serializeJobToState(State state, Job job) {
for (Map.Entry<String, String> entry : job.getConfiguration()) {
state.setProp(JOB_CONFIGURATION_PREFIX + entry.getKey(), entry.getValue());
}
}
protected final TaskContext taskContext;
private final EventSubmitter eventSubmitter;
protected Job mrJob;
public MRTask(TaskContext taskContext) {
super(taskContext);
this.taskContext = taskContext;
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, "gobblin.MRTask")
.addMetadata(additionalEventMetadata()).build();
}
public void onMRTaskComplete (boolean isSuccess, Throwable t) throws RuntimeException {
if (isSuccess) {
this.workingState = WorkUnitState.WorkingState.SUCCESSFUL;
} else if (t == null) {
this.workingState = WorkUnitState.WorkingState.FAILED;
} else {
log.error ("Failed to run MR job with exception {}", ExceptionUtils.getStackTrace(t));
this.workingState = WorkUnitState.WorkingState.FAILED;
}
}
@Override
public void commit() {
log.debug ("State is set to {} inside onMRTaskComplete.", this.workingState);
}
@Override
public void run() {
try {
Job job = createJob();
if (job == null) {
log.info("No MR job created. Skipping.");
this.workingState = WorkUnitState.WorkingState.SUCCESSFUL;
this.eventSubmitter.submit(Events.MR_JOB_SKIPPED);
onSkippedMRJob();
return;
}
job.submit();
log.info("MR tracking URL {} for job {}", job.getTrackingURL(), job.getJobName());
this.eventSubmitter.submit(Events.MR_JOB_STARTED_EVENT, Events.JOB_URL, job.getTrackingURL());
job.waitForCompletion(true);
this.mrJob = job;
if (job.isSuccessful()) {
this.eventSubmitter.submit(Events.MR_JOB_SUCCESSFUL, Events.JOB_URL, job.getTrackingURL());
this.onMRTaskComplete(true, null);
} else {
this.eventSubmitter.submit(Events.MR_JOB_FAILED, Events.JOB_URL, job.getTrackingURL());
JobStatus jobStatus = job.getStatus();
this.onMRTaskComplete (false,
new IOException(String.format("MR Job:%s is not successful, failure info: %s",
job.getTrackingURL(), jobStatus == null ? "Job status not available to inspect for this failing instance."
: job.getStatus().getFailureInfo())));
}
} catch (Throwable t) {
log.error("Failed to run MR job.", t);
this.eventSubmitter.submit(Events.MR_JOB_FAILED, Events.FAILURE_CONTEXT, t.getMessage());
this.onMRTaskComplete (false, t);
}
}
protected Map<String, String> additionalEventMetadata() {
return Maps.newHashMap();
}
/**
* Create the {@link Job} to run in this task.
* @return the {@link Job} to run. If this method returns null, no job will be run and the task will be marked as successful.
*/
protected Job createJob() throws IOException {
Job job = Job.getInstance(new Configuration());
for (Map.Entry<Object, Object> entry : this.taskContext.getTaskState().getProperties().entrySet()) {
if (entry.getKey() instanceof String && ((String) entry.getKey()).startsWith(JOB_CONFIGURATION_PREFIX)) {
String actualKey = ((String) entry.getKey()).substring(JOB_CONFIGURATION_PREFIX.length());
job.getConfiguration().set(actualKey, (String) entry.getValue());
}
}
return job;
}
/**
* Called when a job is skipped (because {@link #createJob()} returned null).
*/
protected void onSkippedMRJob() {
// do nothing
}
}
| 1,604 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/mapreduce/GobblinWorkUnitsInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Singular;
import lombok.extern.slf4j.Slf4j;
/**
* An input format for reading Gobblin inputs (work unit and multi work unit files).
*/
@Slf4j
public class GobblinWorkUnitsInputFormat extends InputFormat<LongWritable, Text> {
private static final String MAX_MAPPERS = GobblinWorkUnitsInputFormat.class.getName() + ".maxMappers";
/**
* Set max mappers used in MR job.
*/
public static void setMaxMappers(Job job, int maxMappers) {
job.getConfiguration().setInt(MAX_MAPPERS, maxMappers);
}
public static int getMaxMapper(Configuration conf) {
return conf.getInt(MAX_MAPPERS, Integer.MAX_VALUE);
}
@Override
public List<InputSplit> getSplits(JobContext context)
throws IOException, InterruptedException {
Path[] inputPaths = FileInputFormat.getInputPaths(context);
if (inputPaths == null || inputPaths.length == 0) {
throw new IOException("No input found!");
}
List<String> allPaths = Lists.newArrayList();
for (Path path : inputPaths) {
// path is a single work unit / multi work unit
FileSystem fs = path.getFileSystem(context.getConfiguration());
FileStatus[] inputs = fs.listStatus(path);
if (inputs == null) {
throw new IOException(String.format("Path %s does not exist.", path));
}
log.info(String.format("Found %d input files at %s: %s", inputs.length, path, Arrays.toString(inputs)));
for (FileStatus input : inputs) {
allPaths.add(input.getPath().toString());
}
}
int maxMappers = getMaxMapper(context.getConfiguration());
int numTasksPerMapper =
allPaths.size() % maxMappers == 0 ? allPaths.size() / maxMappers : allPaths.size() / maxMappers + 1;
List<InputSplit> splits = Lists.newArrayList();
Iterator<String> pathsIt = allPaths.iterator();
while (pathsIt.hasNext()) {
Iterator<String> limitedIterator = Iterators.limit(pathsIt, numTasksPerMapper);
splits.add(new GobblinSplit(Lists.newArrayList(limitedIterator)));
}
return splits;
}
@Override
public RecordReader<LongWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
return new GobblinRecordReader((GobblinSplit) split);
}
/**
* {@link InputSplit} that just contain the work unit / multi work unit files that each mapper should process.
*/
@AllArgsConstructor
@NoArgsConstructor
@Builder
@EqualsAndHashCode
public static class GobblinSplit extends InputSplit implements Writable {
/**
* A list of {@link Path}s containing work unit / multi work unit.
*/
@Getter
@Singular
private List<String> paths;
@Override
public void write(DataOutput out)
throws IOException {
out.writeInt(this.paths.size());
for (String path : this.paths) {
out.writeUTF(path);
}
}
@Override
public void readFields(DataInput in)
throws IOException {
int numPaths = in.readInt();
this.paths = Lists.newArrayList();
for (int i = 0; i < numPaths; i++) {
this.paths.add(in.readUTF());
}
}
@Override
public long getLength()
throws IOException, InterruptedException {
return 0;
}
@Override
public String[] getLocations()
throws IOException, InterruptedException {
return new String[0];
}
}
/**
* Returns records containing the name of the work unit / multi work unit files to process.
*/
public static class GobblinRecordReader extends RecordReader<LongWritable, Text> {
/**
* A factor value that would be used to multiply with "(float) this.currentIdx / (float) this.totalPaths"
* to reflect progress of the whole job.
* We used to use bare "(float) this.currentIdx / (float) this.totalPaths" value for progress, the problem of that is
* whenever deserialization of Gobblin-Workunit finished, the progress is reported as 1.
* We could customize the progress in mapper, but we still want to measure the progress of deserialization.
* The real progress multiplied with certain factor in (0,1) range could hopefully better represent the progress.
*/
private static final String READER_PROGRESS_FACTOR = "mapper.readerProgressFactor" ;
private static final float DEFAULT_READER_PROGRESS_FACTOR = 0.1f;
private int currentIdx = -1;
private final List<String> paths;
private final int totalPaths;
private Properties properties;
public GobblinRecordReader(GobblinSplit split) {
this.paths = split.getPaths();
this.totalPaths = this.paths.size();
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
this.properties = HadoopUtils.getStateFromConf(context.getConfiguration()).getProperties();
}
@Override
public boolean nextKeyValue()
throws IOException, InterruptedException {
this.currentIdx++;
return this.currentIdx < this.totalPaths;
}
@Override
public LongWritable getCurrentKey()
throws IOException, InterruptedException {
return new LongWritable(this.currentIdx);
}
@Override
public Text getCurrentValue()
throws IOException, InterruptedException {
return new Text(this.paths.get(this.currentIdx));
}
@Override
public float getProgress()
throws IOException, InterruptedException {
if (MRJobLauncher.isCustomizedProgressReportEnabled(properties)) {
return 0.0f;
} else {
float factor = properties.containsKey(READER_PROGRESS_FACTOR) ?
Float.parseFloat(properties.getProperty(READER_PROGRESS_FACTOR)) : DEFAULT_READER_PROGRESS_FACTOR;
return factor * ((float) this.currentIdx / (float) this.totalPaths);
}
}
@Override
public void close()
throws IOException {
}
}
}
| 1,605 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_catalog/SpecCatalogListenersList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.Properties;
import org.slf4j.Logger;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecCatalogListenersContainer;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.util.callbacks.CallbacksDispatcher;
public class SpecCatalogListenersList implements SpecCatalogListener, SpecCatalogListenersContainer, Closeable {
private final CallbacksDispatcher<SpecCatalogListener> _disp;
public SpecCatalogListenersList() {
this(Optional.<Logger>absent());
}
public SpecCatalogListenersList(Optional<Logger> log) {
_disp = new CallbacksDispatcher<>(Optional.<ExecutorService>absent(), log);
}
public Logger getLog() {
return _disp.getLog();
}
public synchronized List<SpecCatalogListener> getListeners() {
return _disp.getListeners();
}
@Override
public synchronized void addListener(SpecCatalogListener newListener) {
_disp.addListener(newListener);
}
@Override
public synchronized void removeListener(SpecCatalogListener oldListener) {
_disp.removeListener(oldListener);
}
@Override
public synchronized AddSpecResponse onAddSpec(Spec addedSpec) {
Preconditions.checkNotNull(addedSpec);
try {
return new AddSpecResponse<>(_disp.execCallbacks(new AddSpecCallback(addedSpec)));
} catch (InterruptedException e) {
getLog().warn("onAddSpec interrupted.");
}
return null;
}
@Override
public synchronized void onDeleteSpec(URI deletedSpecURI, String deletedSpecVersion, Properties headers) {
Preconditions.checkNotNull(deletedSpecURI);
try {
_disp.execCallbacks(new SpecCatalogListener.DeleteSpecCallback(deletedSpecURI, deletedSpecVersion, headers));
} catch (InterruptedException e) {
getLog().warn("onDeleteSpec interrupted.");
}
}
@Override
public synchronized void onUpdateSpec(Spec updatedSpec) {
Preconditions.checkNotNull(updatedSpec);
try {
_disp.execCallbacks(new UpdateSpecCallback(updatedSpec));
} catch (InterruptedException e) {
getLog().warn("onUpdateSpec interrupted.");
}
}
@Override
public void close()
throws IOException {
_disp.close();
}
public void callbackOneListener(Function<SpecCatalogListener, AddSpecResponse> callback,
SpecCatalogListener listener) {
try {
_disp.execCallbacks(callback, listener);
} catch (InterruptedException e) {
getLog().warn("callback interrupted: "+ callback);
}
}
@Override
public void registerWeakSpecCatalogListener(SpecCatalogListener specListener) {
_disp.addWeakListener(specListener);
}
}
| 1,606 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_catalog/TopologyCatalog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractIdleService;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import javax.annotation.Nonnull;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.Getter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment;
import org.apache.gobblin.runtime.api.MutableSpecCatalog;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalog;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.api.SpecSerDe;
import org.apache.gobblin.runtime.api.SpecStore;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_serde.JavaSpecSerDe;
import org.apache.gobblin.runtime.spec_store.FSSpecStore;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.callbacks.CallbackResult;
import org.apache.gobblin.util.callbacks.CallbacksDispatcher;
@Alpha
@Singleton
public class TopologyCatalog extends AbstractIdleService implements SpecCatalog, MutableSpecCatalog {
public static final String DEFAULT_TOPOLOGYSPEC_STORE_CLASS = FSSpecStore.class.getCanonicalName();
public static final String DEFAULT_TOPOLOGYSPEC_SERDE_CLASS = JavaSpecSerDe.class.getCanonicalName();
protected final SpecCatalogListenersList listeners;
protected final Logger log;
protected final MetricContext metricContext;
protected final TopologyCatalog.StandardMetrics metrics;
protected final SpecStore specStore;
@Getter
protected CountDownLatch initComplete = new CountDownLatch(1);
private final ClassAliasResolver<SpecStore> aliasResolver;
public TopologyCatalog(Config config) {
this(config, Optional.<Logger>absent());
}
public TopologyCatalog(Config config, Optional<Logger> log) {
this(config, log, Optional.<MetricContext>absent(), true);
}
@Inject
public TopologyCatalog(Config config, GobblinInstanceEnvironment env) {
this(config, Optional.of(env.getLog()), Optional.of(env.getMetricContext()),
env.isInstrumentationEnabled());
}
public TopologyCatalog(Config config, Optional<Logger> log, Optional<MetricContext> parentMetricContext,
boolean instrumentationEnabled) {
this.log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
this.listeners = new SpecCatalogListenersList(log);
if (instrumentationEnabled) {
MetricContext realParentCtx =
parentMetricContext.or(Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(), getClass()));
this.metricContext = realParentCtx.childBuilder(TopologyCatalog.class.getSimpleName()).build();
this.metrics = new SpecCatalog.StandardMetrics(this, Optional.of(config));
this.addListener(this.metrics);
}
else {
this.metricContext = null;
this.metrics = null;
}
this.aliasResolver = new ClassAliasResolver<>(SpecStore.class);
try {
Config newConfig = config;
if (config.hasPath(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY)) {
newConfig = config.withValue(FSSpecStore.SPECSTORE_FS_DIR_KEY,
config.getValue(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY));
}
String specStoreClassName = ConfigUtils.getString(config, ConfigurationKeys.TOPOLOGYSPEC_STORE_CLASS_KEY,
DEFAULT_TOPOLOGYSPEC_STORE_CLASS);
this.log.info("Using SpecStore class name/alias " + specStoreClassName);
String specSerDeClassName = ConfigUtils.getString(config, ConfigurationKeys.TOPOLOGYSPEC_SERDE_CLASS_KEY,
DEFAULT_TOPOLOGYSPEC_SERDE_CLASS);
this.log.info("Using SpecSerDe class name/alias " + specSerDeClassName);
SpecSerDe specSerDe = (SpecSerDe) ConstructorUtils.invokeConstructor(Class.forName(
new ClassAliasResolver<>(SpecSerDe.class).resolve(specSerDeClassName)));
this.specStore = (SpecStore) ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve(
specStoreClassName)), newConfig, specSerDe);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
/***************************************************
/* Catalog init and shutdown handlers *
/**************************************************/
@Override
protected void startUp() throws Exception {
notifyAllListeners();
}
@Override
protected void shutDown() throws Exception {
this.listeners.close();
}
/***************************************************
/* Catalog listeners *
/**************************************************/
protected void notifyAllListeners() {
for (Spec spec : getSpecs()) {
this.listeners.onAddSpec(spec);
}
}
@Override
public void addListener(SpecCatalogListener specListener) {
Preconditions.checkNotNull(specListener);
this.listeners.addListener(specListener);
if (state() == Service.State.RUNNING) {
for (Spec spec : getSpecs()) {
SpecCatalogListener.AddSpecCallback addJobCallback = new SpecCatalogListener.AddSpecCallback(spec);
this.listeners.callbackOneListener(addJobCallback, specListener);
}
}
}
@Override
public void removeListener(SpecCatalogListener specCatalogListener) {
this.listeners.removeListener(specCatalogListener);
}
@Override
public void registerWeakSpecCatalogListener(SpecCatalogListener specCatalogListener) {
this.listeners.registerWeakSpecCatalogListener(specCatalogListener);
}
/***************************************************
/* Catalog metrics *
/**************************************************/
@Nonnull
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return null != this.metricContext;
}
@Override
public List<Tag<?>> generateTags(org.apache.gobblin.configuration.State state) {
return Collections.emptyList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
throw new UnsupportedOperationException();
}
@Override
public void switchMetricContext(MetricContext context) {
throw new UnsupportedOperationException();
}
@Override
public SpecCatalog.StandardMetrics getMetrics() {
return this.metrics;
}
/**************************************************
/* Catalog core functionality *
/**************************************************/
@Override
public Collection<Spec> getSpecs() {
try {
return specStore.getSpecs();
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Specs from Spec store", e);
}
}
@Override
public int getSize() {
try {
return specStore.getSize();
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve number of specs from Spec store", e);
}
}
@Override
public Spec getSpecs(URI uri) throws SpecNotFoundException {
try {
return specStore.getSpec(uri);
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Spec from Spec store for URI: " + uri, e);
}
}
@Override
public Map<String, AddSpecResponse> put(Spec spec) {
Map<String, AddSpecResponse> responseMap = new HashMap<>();
try {
Preconditions.checkState(state() == Service.State.RUNNING, String.format("%s is not running.", this.getClass().getName()));
Preconditions.checkNotNull(spec);
log.info(String.format("Adding TopologySpec with URI: %s and Config: %s", spec.getUri(),
((TopologySpec) spec).getConfigAsProperties()));
specStore.addSpec(spec);
AddSpecResponse<CallbacksDispatcher.CallbackResults<SpecCatalogListener, AddSpecResponse>> response = this.listeners.onAddSpec(spec);
for (Map.Entry<SpecCatalogListener, CallbackResult<AddSpecResponse>> entry: response.getValue().getSuccesses().entrySet()) {
responseMap.put(entry.getKey().getName(), entry.getValue().getResult());
}
} catch (IOException e) {
throw new RuntimeException("Cannot add Spec to Spec store: " + spec, e);
}
return responseMap;
}
public void remove(URI uri) {
remove(uri, new Properties());
}
@Override
public void remove(URI uri, Properties headers) {
try {
Preconditions.checkState(state() == Service.State.RUNNING, String.format("%s is not running.", this.getClass().getName()));
Preconditions.checkNotNull(uri);
log.info(String.format("Removing TopologySpec with URI: %s", uri));
this.listeners.onDeleteSpec(uri, FlowSpec.Builder.DEFAULT_VERSION, headers);
specStore.deleteSpec(uri);
} catch (IOException e) {
throw new RuntimeException("Cannot delete Spec from Spec store for URI: " + uri, e);
}
}
}
| 1,607 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_catalog/FlowCatalog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.inject.Named;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.util.InjectionNames;
import org.apache.gobblin.util.ExponentialBackoff;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import javax.annotation.Nonnull;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.Getter;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment;
import org.apache.gobblin.runtime.api.MutableSpecCatalog;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalog;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.api.SpecSearchObject;
import org.apache.gobblin.runtime.api.SpecSerDe;
import org.apache.gobblin.runtime.api.SpecStore;
import org.apache.gobblin.runtime.spec_serde.JavaSpecSerDe;
import org.apache.gobblin.runtime.spec_store.FSSpecStore;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.callbacks.CallbackResult;
import org.apache.gobblin.util.callbacks.CallbacksDispatcher;
/**
* A service that interact with FlowSpec storage.
* The FlowSpec storage, a.k.a. {@link SpecStore} should be plugable with different implementation.
*/
@Singleton
public class FlowCatalog extends AbstractIdleService implements SpecCatalog, MutableSpecCatalog {
/***
* Configuration properties related to FlowSpec Store
*/
public static final String FLOWSPEC_STORE_CLASS_KEY = "flowSpec.store.class";
public static final String FLOWSPEC_STORE_DIR_KEY = "flowSpec.store.dir";
public static final String DEFAULT_FLOWSPEC_STORE_CLASS = FSSpecStore.class.getCanonicalName();
public static final String FLOWSPEC_SERDE_CLASS_KEY = "flowSpec.serde.class";
public static final String DEFAULT_FLOWSPEC_SERDE_CLASS = JavaSpecSerDe.class.getCanonicalName();
private static final long FLOWCATALOG_GET_SPEC_INITIAL_WAIT_AFTER_FAILURE = 1000L;
protected final SpecCatalogListenersList listeners;
protected final Logger log;
protected final MetricContext metricContext;
protected final MutableStandardMetrics metrics;
protected final boolean isWarmStandbyEnabled;
protected final int maxRetriesWhenGetSpec;
@Getter
protected final SpecStore specStore;
// a map which keeps a handle of condition variables for each spec being added to the flow catalog
// to provide synchronization needed for flow specs
private final Map<String, Object> specSyncObjects = new HashMap<>();
private final ClassAliasResolver<SpecStore> aliasResolver;
public FlowCatalog(Config config) {
this(config, Optional.<Logger>absent());
}
public FlowCatalog(Config config, Optional<Logger> log) {
this(config, log, Optional.<MetricContext>absent(), true, false);
}
@Inject
public FlowCatalog(Config config, GobblinInstanceEnvironment env, @Named(InjectionNames.WARM_STANDBY_ENABLED) boolean isWarmStandbyEnabled) {
this(config, Optional.of(env.getLog()), Optional.of(env.getMetricContext()),
env.isInstrumentationEnabled(), isWarmStandbyEnabled);
}
public FlowCatalog(Config config, Optional<Logger> log, Optional<MetricContext> parentMetricContext,
boolean instrumentationEnabled, boolean isWarmStandbyEnabled) {
this.log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
this.listeners = new SpecCatalogListenersList(log);
if (instrumentationEnabled) {
MetricContext realParentCtx =
parentMetricContext.or(Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(), getClass()));
this.metricContext = realParentCtx.childBuilder(FlowCatalog.class.getSimpleName()).build();
this.metrics = new MutableStandardMetrics(this, Optional.of(config));
this.addListener(this.metrics);
} else {
this.metricContext = null;
this.metrics = null;
}
this.isWarmStandbyEnabled = isWarmStandbyEnabled;
this.aliasResolver = new ClassAliasResolver<>(SpecStore.class);
this.maxRetriesWhenGetSpec = ConfigUtils.getInt(config, ConfigurationKeys.MYSQL_GET_MAX_RETRIES, ConfigurationKeys.DEFAULT_MYSQL_GET_MAX_RETRIES);
try {
Config newConfig = config;
if (config.hasPath(FLOWSPEC_STORE_DIR_KEY)) {
newConfig = config.withValue(FSSpecStore.SPECSTORE_FS_DIR_KEY,
config.getValue(FLOWSPEC_STORE_DIR_KEY));
}
String specStoreClassName = ConfigUtils.getString(config, FLOWSPEC_STORE_CLASS_KEY, DEFAULT_FLOWSPEC_STORE_CLASS);
this.log.info(String.format("Using class name/alias [%s] for specstore", specStoreClassName));
String specSerDeClassName = ConfigUtils.getString(config, FLOWSPEC_SERDE_CLASS_KEY, DEFAULT_FLOWSPEC_SERDE_CLASS);
this.log.info(String.format("Using class name/alias [%s] for spec serde", specSerDeClassName));
SpecSerDe specSerDe = (SpecSerDe) ConstructorUtils.invokeConstructor(Class.forName(
new ClassAliasResolver<>(SpecSerDe.class).resolve(specSerDeClassName)));
this.specStore = (SpecStore) ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve(
specStoreClassName)), newConfig, specSerDe);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
/***************************************************
/* Catalog init and shutdown handlers *
/**************************************************/
@Override
protected void startUp() throws Exception {
//Do nothing
}
@Override
protected void shutDown() throws Exception {
this.listeners.close();
}
/***************************************************
/* Catalog listeners *
/**************************************************/
protected void notifyAllListeners() {
try {
Iterator<URI> uriIterator = getSpecURIs();
while (uriIterator.hasNext()) {
this.listeners.onAddSpec(getSpecWrapper(uriIterator.next()));
}
} catch (IOException e) {
log.error("Cannot retrieve specs from catalog:", e);
}
}
@Override
public void addListener(SpecCatalogListener specListener) {
Preconditions.checkNotNull(specListener);
this.listeners.addListener(specListener);
if (state() == State.RUNNING) {
try {
Iterator<URI> uriIterator = getSpecURIs();
while (uriIterator.hasNext()) {
SpecCatalogListener.AddSpecCallback addJobCallback =
new SpecCatalogListener.AddSpecCallback(getSpecWrapper(uriIterator.next()));
this.listeners.callbackOneListener(addJobCallback, specListener);
}
} catch (IOException e) {
log.error("Cannot retrieve specs from catalog:", e);
}
}
}
@Override
public void removeListener(SpecCatalogListener specCatalogListener) {
this.listeners.removeListener(specCatalogListener);
}
@Override
public void registerWeakSpecCatalogListener(SpecCatalogListener specCatalogListener) {
this.listeners.registerWeakSpecCatalogListener(specCatalogListener);
}
/***************************************************
/* Catalog metrics *
/**************************************************/
@Nonnull
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return null != this.metricContext;
}
@Override
public List<Tag<?>> generateTags(org.apache.gobblin.configuration.State state) {
return Collections.emptyList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
throw new UnsupportedOperationException();
}
@Override
public void switchMetricContext(MetricContext context) {
throw new UnsupportedOperationException();
}
@Override
public SpecCatalog.StandardMetrics getMetrics() {
return this.metrics;
}
/**************************************************
/* Catalog core functionality *
/**************************************************/
public Iterator<URI> getSpecURIs() throws IOException {
return specStore.getSpecURIs();
}
public Iterator<URI> getSpecURISWithTag(String tag) throws IOException {
return specStore.getSpecURIsWithTag(tag);
}
/**
* Get all specs from {@link SpecStore}
* Not suggested for {@link FlowCatalog} where the total amount of space that all {@link FlowSpec}s occupied
* would be large and loading process is slow.
*/
@Deprecated
@Override
public Collection<Spec> getSpecs() {
try {
return specStore.getSpecs();
// TODO: Have kind of metrics keeping track of specs that failed to be deserialized.
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Specs from Spec store", e);
}
}
/**
* Get number of specs from {@link SpecStore}
*/
@Override
public int getSize() {
try {
return specStore.getSize();
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve number of specs from Spec store", e);
}
}
public boolean exists(URI uri) {
try {
return specStore.exists(uri);
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Spec from Spec store for URI: " + uri, e);
}
}
@Override
public Spec getSpecs(URI uri) throws SpecNotFoundException {
try {
return specStore.getSpec(uri);
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Spec from Spec store for URI: " + uri, e);
}
}
@Override
public Collection<Spec> getSpecs(SpecSearchObject specSearchObject) {
try {
return specStore.getSpecs(specSearchObject);
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve Spec from Spec store for URI: " + specSearchObject, e);
}
}
public Collection<Spec> getAllSpecs() {
try {
return specStore.getSpecs();
} catch (IOException e) {
throw new RuntimeException("Cannot retrieve all specs from Spec stores", e);
}
}
/**
* A function to get a batch of specs in the {@link SpecStore} between the provided start index and (start + count - 1) index, inclusive.
* This enables pagination so getting SpecStore object will not timeout, and can be tuned to how many results is desired at any one time.
* The {@link Spec} in {@link SpecStore} are sorted in ascending order of the spec_uri while paginating.
*
* @param start The start index.
* @param count The total number of records to get.
* @return A collection of specs between start and start + count - 1, inclusive.
*/
public Collection<Spec> getSpecsPaginated(int start, int count) {
try {
return specStore.getSpecsPaginated(start, count);
} catch (IOException | IllegalArgumentException e) {
throw new RuntimeException("Cannot retrieve specs from Spec stores between " + start + " and " + (start + count - 1), e);
}
}
/**
* A wrapper of getSpecs that handles {@link SpecNotFoundException} properly.
* This is the most common way to fetch {@link Spec}. For customized way to deal with exception, one will
* need to implement specific catch-block logic.
*/
public Spec getSpecWrapper(URI uri) {
Spec spec = null;
ExponentialBackoff exponentialBackoff = ExponentialBackoff.builder().maxRetries(this.maxRetriesWhenGetSpec).initialDelay(
FLOWCATALOG_GET_SPEC_INITIAL_WAIT_AFTER_FAILURE).build();
try {
spec = getSpecHelper(uri, exponentialBackoff);
} catch (InterruptedException e) {
log.error(String.format("Failed to get %s in SpecStore", uri), e);
}
return spec;
}
private Spec getSpecHelper(URI uri, ExponentialBackoff exponentialBackoff)
throws InterruptedException {
Spec spec= null;
try {
spec = getSpecs(uri);
} catch (SpecNotFoundException snfe) {
if (exponentialBackoff.awaitNextRetryIfAvailable()) {
return getSpecHelper(uri, exponentialBackoff);
} else {
log.error(String.format("The URI %s discovered in SpecStore is missing in FlowCatalog" + ", suspecting current modification on SpecStore", uri), snfe);
}
}
return spec;
}
public Map<String, AddSpecResponse> put(Spec spec, boolean triggerListener) throws Throwable {
return updateOrAddSpecHelper(spec, triggerListener, false, Long.MAX_VALUE);
}
public Map<String, AddSpecResponse> update(Spec spec, boolean triggerListener, long modifiedWatermark) throws Throwable {
return updateOrAddSpecHelper(spec, triggerListener, true, modifiedWatermark);
}
/**
* Persist {@link Spec} into {@link SpecStore} and notify {@link SpecCatalogListener} if triggerListener
* is set to true.
* If the {@link Spec} is a {@link FlowSpec} it is persisted if it can be compiled at the time this method received
* the spec. `explain` specs are not persisted. The logic of this method is tightly coupled with the logic of
* {@link GobblinServiceJobScheduler#onAddSpec()} or {@link Orchestrator#onAddSpec()} in warm standby mode,
* which is one of the listener of {@link FlowCatalog}.
* We use condition variables {@link #specSyncObjects} to achieve synchronization between
* {@link GobblinServiceJobScheduler#NonScheduledJobRunner} thread and this thread to ensure deletion of
* {@link FlowSpec} happens after the corresponding run once flow is submitted to the orchestrator.
*
* @param spec The Spec to be added
* @param triggerListener True if listeners should be notified.
* @param isUpdate Whether this is update or add operation, it will call different method in spec store to persist the spec
* @param modifiedWatermark If it's update operation, the largest modifiedWatermark that it can modify, or in other word, the timestamp which old spec should be modified before
* @return a map of listeners and their {@link AddSpecResponse}s
*/
private Map<String, AddSpecResponse> updateOrAddSpecHelper(Spec spec, boolean triggerListener, boolean isUpdate, long modifiedWatermark) throws Throwable {
Map<String, AddSpecResponse> responseMap = new HashMap<>();
FlowSpec flowSpec = (FlowSpec) spec;
Preconditions.checkState(state() == State.RUNNING, String.format("%s is not running.", this.getClass().getName()));
Preconditions.checkNotNull(flowSpec);
log.info(String.format("Adding FlowSpec with URI: %s and Config: %s", flowSpec.getUri(), flowSpec.getConfigAsProperties()));
Object syncObject = new Object();
specSyncObjects.put(flowSpec.getUri().toString(), syncObject);
if (triggerListener) {
AddSpecResponse<CallbacksDispatcher.CallbackResults<SpecCatalogListener, AddSpecResponse>> response = this.listeners.onAddSpec(flowSpec);
for (Map.Entry<SpecCatalogListener, CallbackResult<AddSpecResponse>> entry : response.getValue().getSuccesses().entrySet()) {
responseMap.put(entry.getKey().getName(), entry.getValue().getResult());
}
// If flow fails compilation, the result will have a non-empty string with the error
if (response.getValue().getFailures().size() > 0) {
for (Map.Entry<SpecCatalogListener, CallbackResult<AddSpecResponse>> entry : response.getValue().getFailures().entrySet()) {
throw entry.getValue().getError().getCause();
}
}
}
AddSpecResponse<String> compileResponse;
if (isWarmStandbyEnabled) {
compileResponse = responseMap.getOrDefault(ServiceConfigKeys.GOBBLIN_ORCHESTRATOR_LISTENER_CLASS, new AddSpecResponse<>(null));
//todo: do we check quota here? or in compiler? Quota manager need dag to check quota which is not accessable from this class
} else {
compileResponse = responseMap.getOrDefault(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS, new AddSpecResponse<>(null));
}
responseMap.put(ServiceConfigKeys.COMPILATION_RESPONSE, compileResponse);
// Check that the flow configuration is valid and matches to a corresponding edge
if (isCompileSuccessful(compileResponse.getValue())) {
synchronized (syncObject) {
try {
if (!flowSpec.isExplain()) {
long startTime = System.currentTimeMillis();
if (isUpdate) {
specStore.updateSpec(spec, modifiedWatermark);
} else {
specStore.addSpec(spec);
}
metrics.updatePutSpecTime(startTime);
}
responseMap.put(ServiceConfigKeys.COMPILATION_SUCCESSFUL, new AddSpecResponse<>("true"));
} catch (IOException e) {
String operation = isUpdate ? "update" : "add";
throw new RuntimeException("Cannot " + operation + " Spec to Spec store: " + flowSpec, e);
} finally {
syncObject.notifyAll();
this.specSyncObjects.remove(flowSpec.getUri().toString());
}
}
} else {
responseMap.put(ServiceConfigKeys.COMPILATION_SUCCESSFUL, new AddSpecResponse<>("false"));
}
return responseMap;
}
public static boolean isCompileSuccessful(String dag) {
return dag != null && !dag.contains(ConfigException.class.getSimpleName());
}
@Override
public Map<String, AddSpecResponse> put(Spec spec) throws Throwable {
return put(spec, true);
}
public void remove(URI uri) {
remove(uri, new Properties());
}
@Override
public void remove(URI uri, Properties headers) {
this.remove(uri, headers, true);
}
public void remove(URI uri, Properties headers, boolean triggerListener) {
try {
Preconditions.checkState(state() == State.RUNNING, String.format("%s is not running.", this.getClass().getName()));
Preconditions.checkNotNull(uri);
long startTime = System.currentTimeMillis();
log.info(String.format("Removing FlowSpec with URI: %s", uri));
specStore.deleteSpec(uri);
this.metrics.updateRemoveSpecTime(startTime);
if (triggerListener) {
this.listeners.onDeleteSpec(uri, FlowSpec.Builder.DEFAULT_VERSION, headers);
}
} catch (IOException e) {
throw new RuntimeException("Cannot delete Spec from Spec store for URI: " + uri, e);
}
}
public Object getSyncObject(String specUri) {
return this.specSyncObjects.getOrDefault(specUri, null);
}
}
| 1,608 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_catalog/AddSpecResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.spec_catalog;
/**
* A generic class that allows listeners of a {@link org.apache.gobblin.runtime.api.SpecCatalog} to return a response when a
* {@link org.apache.gobblin.runtime.api.Spec} is added to the {@link org.apache.gobblin.runtime.api.SpecCatalog}.
* @param <T>
*/
public class AddSpecResponse<T> {
private T value;
public AddSpecResponse(T value) {
this.value = value;
}
public T getValue() {
return this.value;
}
}
| 1,609 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/RunOnceJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import java.io.File;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobState;
/**
* An implementation of {@link JobListener} for run-once jobs.
*
* @author Yinan Li
*/
public class RunOnceJobListener extends AbstractJobListener {
private static final Logger LOG = LoggerFactory.getLogger(RunOnceJobListener.class);
@Override
public void onJobCompletion(JobContext jobContext) {
JobState jobState = jobContext.getJobState();
if (!jobState.contains(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY)) {
LOG.error("Job configuration file path not found in job state of job " + jobState.getJobId());
return;
}
String jobConfigFile = jobState.getProp(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY);
// Rename the config file so we won't run this job when the worker is bounced
try {
Files.move(new File(jobConfigFile), new File(jobConfigFile + ".done"));
} catch (IOException ioe) {
LOG.error("Failed to rename job configuration file for job " + jobState.getJobName(), ioe);
}
}
}
| 1,610 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/AbstractCloseableJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
/**
* Extension of {@link AbstractJobListener} that also extends {@link CloseableJobListener}.
*
* @see AbstractJobListener
* @author Joel Baranick
*/
public abstract class AbstractCloseableJobListener extends AbstractJobListener implements CloseableJobListener {
}
| 1,611 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/JobExecutionEventSubmitterListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import lombok.AllArgsConstructor;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobExecutionEventSubmitter;
@AllArgsConstructor
/**
* Implementation of {@link JobListener} that submits metadata events via {@link JobExecutionEventSubmitter} when a job
* is completed or is cancelled.
*/
public class JobExecutionEventSubmitterListener extends AbstractJobListener {
private final JobExecutionEventSubmitter jobExecutionEventSubmitter;
@Override
public void onJobCompletion(JobContext jobContext) {
this.jobExecutionEventSubmitter.submitJobExecutionEvents(jobContext.getJobState());
}
@Override
public void onJobCancellation(JobContext jobContext) {
this.jobExecutionEventSubmitter.submitJobExecutionEvents(jobContext.getJobState());
}
}
| 1,612 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/CloseableJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import java.io.Closeable;
/**
* Extension of {@link JobListener} that also extends {@link Closeable}.
*
* @see JobListener
*/
public interface CloseableJobListener extends JobListener, Closeable {
}
| 1,613 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/AbstractJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import org.apache.gobblin.runtime.JobContext;
/**
* An abstract implementation of {@link JobListener} which ensures that
* subclasses do not have to implement all lifecycle events.
*
* @author Joel Baranick
*/
public abstract class AbstractJobListener implements JobListener {
private final Optional<Logger> _log;
public AbstractJobListener(Optional<Logger> log) {
_log = log;
}
public AbstractJobListener() {
this(Optional.<Logger>absent());
}
@Override
public void onJobPrepare(JobContext jobContext) throws Exception {
if (_log.isPresent()) {
_log.get().info("jobPrepare: " + jobContext);
}
}
@Override
public void onJobStart(JobContext jobContext) throws Exception {
if (_log.isPresent()) {
_log.get().info("jobStart: " + jobContext);
}
}
@Override
public void onJobCompletion(JobContext jobContext) throws Exception {
if (_log.isPresent()) {
_log.get().info("jobCompletion: " + jobContext);
}
}
@Override
public void onJobCancellation(JobContext jobContext) throws Exception {
if (_log.isPresent()) {
_log.get().info("jobCancellation: " + jobContext);
}
}
@Override
public void onJobFailure(JobContext jobContext) throws Exception {
if (_log.isPresent()) {
_log.get().info("jobFailure: " + jobContext);
}
}
protected Optional<Logger> getLog() {
return _log;
}
}
| 1,614 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/JobListeners.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.google.common.base.Optional;
import com.google.common.base.Predicates;
import com.google.common.collect.Iterables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* Static utility methods pertaining to {@link JobListener}s.
*
* @see JobListener
*/
public class JobListeners {
/**
* Chains a given {@link List} of {@link JobListener}s into a single {@link JobListener}. The specified {@link JobListener}s
* will all be executed in parallel.
*
* @param jobListeners is a {@link List} of {@link JobListener}s that need to be executed
*
* @return a {@link CloseableJobListener}, which is similar to {@link JobListener}, except
* {@link CloseableJobListener#close()} will block until all {@link JobListener}s have finished their executions.
*/
public static CloseableJobListener parallelJobListener(List<JobListener> jobListeners) {
Iterables.removeIf(jobListeners, Predicates.isNull());
return new ParallelJobListener(jobListeners);
}
/**
* Implementation of {@link CloseableJobListener} that executes a given {@link List} of {@link JobListener}s in parallel.
*/
private static final class ParallelJobListener implements CloseableJobListener {
private static final Logger LOGGER = LoggerFactory.getLogger(ParallelJobListener.class);
private final List<JobListener> jobListeners;
private final ExecutorService executor;
private final CompletionService<Void> completionService;
public ParallelJobListener(List<JobListener> jobListeners) {
this.jobListeners = jobListeners;
this.executor = Executors.newCachedThreadPool(
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelJobListener")));
this.completionService = new ExecutorCompletionService<>(this.executor);
}
@Override
public void onJobPrepare(final JobContext jobContext) {
for (final JobListener jobListener : this.jobListeners) {
this.completionService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
jobListener.onJobPrepare(jobContext);
return null;
}
});
}
}
@Override
public void onJobStart(final JobContext jobContext) {
for (final JobListener jobListener : this.jobListeners) {
this.completionService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
jobListener.onJobStart(jobContext);
return null;
}
});
}
}
@Override
public void onJobCompletion(final JobContext jobContext) {
for (final JobListener jobListener : this.jobListeners) {
this.completionService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
jobListener.onJobCompletion(jobContext);
return null;
}
});
}
}
@Override
public void onJobCancellation(final JobContext jobContext) {
for (final JobListener jobListener : this.jobListeners) {
this.completionService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
jobListener.onJobCancellation(jobContext);
return null;
}
});
}
}
@Override
public void onJobFailure(final JobContext jobContext) {
for (final JobListener jobListener : this.jobListeners) {
this.completionService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
jobListener.onJobFailure(jobContext);
return null;
}
});
}
}
@Override
public void close() throws IOException {
try {
boolean wasInterrupted = false;
IOException exception = null;
for (int i = 0; i < this.jobListeners.size(); i++) {
try {
if (wasInterrupted) {
this.completionService.take().cancel(true);
} else {
this.completionService.take().get();
}
} catch (InterruptedException ie) {
wasInterrupted = true;
if (exception == null) {
exception = new IOException(ie);
}
} catch (ExecutionException ee) {
if (exception == null) {
exception = new IOException(ee.getCause());
}
}
}
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
if (exception != null) {
throw exception;
}
} finally {
ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(LOGGER));
closeJobListeners();
}
}
private void closeJobListeners() throws IOException {
IOException exception = null;
for (JobListener jobListener : this.jobListeners) {
if (jobListener instanceof Closeable) {
try {
((Closeable) jobListener).close();
} catch (IOException e) {
if (exception == null) {
exception = e;
} else {
LOGGER.warn("JobListener failed while calling close. Suppressed exception beyond first.", e);
exception.addSuppressed(e);
}
}
}
}
if (exception != null) {
throw exception;
}
}
}
}
| 1,615 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/CompositeJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import java.util.List;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.gobblin.runtime.JobContext;
import com.google.common.collect.Lists;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@AllArgsConstructor
public class CompositeJobListener extends AbstractJobListener {
private List<JobListener> listeners = Lists.newArrayList();
public CompositeJobListener() {
}
public void addJobListener(JobListener listener) {
this.listeners.add(listener);
}
@Override
public void onJobPrepare(JobContext jobContext) throws Exception {
StringBuffer buf = new StringBuffer();
for (JobListener listener: listeners) {
try {
listener.onJobPrepare(jobContext);
} catch (Exception e) {
buf.append(listener.getClass().getName() + ":" + e.toString());
log.error(ExceptionUtils.getFullStackTrace(e));
}
}
String exceptions = buf.toString();
if (!exceptions.isEmpty()) {
throw new RuntimeException(exceptions);
}
}
@Override
public void onJobStart(JobContext jobContext) throws Exception {
StringBuffer buf = new StringBuffer();
for (JobListener listener: listeners) {
try {
listener.onJobStart(jobContext);
} catch (Exception e) {
buf.append(listener.getClass().getName() + ":" + e.toString());
log.error(ExceptionUtils.getFullStackTrace(e));
}
}
String exceptions = buf.toString();
if (!exceptions.isEmpty()) {
throw new RuntimeException(exceptions);
}
}
@Override
public void onJobCompletion(JobContext jobContext) throws Exception {
StringBuffer buf = new StringBuffer();
for (JobListener listener: listeners) {
try {
listener.onJobCompletion(jobContext);
} catch (Exception e) {
buf.append(listener.getClass().getName() + ":" + e.toString());
log.error(ExceptionUtils.getFullStackTrace(e));
}
}
String exceptions = buf.toString();
if (!exceptions.isEmpty()) {
throw new RuntimeException(exceptions);
}
}
@Override
public void onJobCancellation(JobContext jobContext) throws Exception {
StringBuffer buf = new StringBuffer();
for (JobListener listener: listeners) {
try {
listener.onJobCancellation(jobContext);
} catch (Exception e) {
buf.append(listener.getClass().getName() + ":" + e.toString());
log.error(ExceptionUtils.getFullStackTrace(e));
}
}
String exceptions = buf.toString();
if (!exceptions.isEmpty()) {
throw new RuntimeException(exceptions);
}
}
@Override
public void onJobFailure(JobContext jobContext) throws Exception {
StringBuffer buf = new StringBuffer();
for (JobListener listener: listeners) {
try {
listener.onJobFailure(jobContext);
} catch (Exception e) {
buf.append(listener.getClass().getName() + ":" + e.toString());
log.error(ExceptionUtils.getFullStackTrace(e));
}
}
String exceptions = buf.toString();
if (!exceptions.isEmpty()) {
throw new RuntimeException(exceptions);
}
}
}
| 1,616 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/JobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An interface for classes used for callback on job state changes.
*/
public interface JobListener {
/**
* Called when a job is to be prepared, i.e. before determining the {@link WorkUnit}s.
*
* @param jobContext a {@link JobContext} object
*/
void onJobPrepare(JobContext jobContext) throws Exception;
/**
* Called when a job is started, i.e. before the {@link WorkUnit}s are executed.
*
* @param jobContext a {@link JobContext} object
*/
void onJobStart(JobContext jobContext) throws Exception;
/**
* Called when a job is completed.
*
* @param jobContext a {@link JobContext} object
*/
void onJobCompletion(JobContext jobContext) throws Exception;
/**
* Called when a job is cancelled.
*
* @param jobContext a {@link JobContext} object
*/
void onJobCancellation(JobContext jobContext) throws Exception;
/**
* Called when a job has failed.
*
* @param jobContext a {@link JobContext} object
*/
void onJobFailure(JobContext jobContext) throws Exception;
}
| 1,617 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/listeners/EmailNotificationJobListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.listeners;
import org.apache.commons.mail.EmailException;
import org.apache.gobblin.annotation.Alias;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.util.EmailUtils;
/**
* An implementation of {@link JobListener} that sends email notifications.
*
* @author Yinan Li
*/
@Alias("EmailNotificationJobListener")
public class EmailNotificationJobListener extends AbstractJobListener {
private static final Logger LOGGER = LoggerFactory.getLogger(EmailNotificationJobListener.class);
@Override
public void onJobCompletion(JobContext jobContext) {
JobState jobState = jobContext.getJobState();
boolean alertEmailEnabled =
Boolean.valueOf(jobState.getProp(ConfigurationKeys.ALERT_EMAIL_ENABLED_KEY, Boolean.toString(false)));
boolean notificationEmailEnabled =
Boolean.valueOf(jobState.getProp(ConfigurationKeys.NOTIFICATION_EMAIL_ENABLED_KEY, Boolean.toString(false)));
// Send out alert email if the maximum number of consecutive failures is reached
if (jobState.getState() == JobState.RunningState.FAILED) {
int failures = jobState.getPropAsInt(ConfigurationKeys.JOB_FAILURES_KEY, 0) + jobContext.getDatasetStateFailures();
int maxFailures =
jobState.getPropAsInt(ConfigurationKeys.JOB_MAX_FAILURES_KEY, ConfigurationKeys.DEFAULT_JOB_MAX_FAILURES);
if (alertEmailEnabled && failures >= maxFailures) {
try {
EmailUtils.sendJobFailureAlertEmail(jobState.getJobName(), jobState.toString(), failures, jobState);
} catch (EmailException ee) {
LOGGER.error("Failed to send job failure alert email for job " + jobState.getJobId(), ee);
}
return;
}
}
if (notificationEmailEnabled) {
try {
EmailUtils.sendJobCompletionEmail(
jobState.getJobId(), jobState.toString(), jobState.getState().toString(), jobState);
} catch (EmailException ee) {
LOGGER.error("Failed to send job completion notification email for job " + jobState.getJobId(), ee);
}
}
}
@Override
public void onJobCancellation(JobContext jobContext) {
JobState jobState = jobContext.getJobState();
boolean notificationEmailEnabled =
Boolean.valueOf(jobState.getProp(ConfigurationKeys.NOTIFICATION_EMAIL_ENABLED_KEY, Boolean.toString(false)));
if (notificationEmailEnabled) {
try {
EmailUtils.sendJobCancellationEmail(jobState.getJobId(), jobState.toString(), jobState);
} catch (EmailException ee) {
LOGGER.error("Failed to send job cancellation notification email for job " + jobState.getJobId(), ee);
}
}
}
}
| 1,618 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.embedded;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.avro.SchemaBuilder;
import org.apache.commons.lang3.ClassUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ClassUtil;
import org.joda.time.Period;
import org.joda.time.ReadableInstant;
import org.reflections.Reflections;
import org.slf4j.Logger;
import com.codahale.metrics.MetricFilter;
import com.github.rholder.retry.RetryListener;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.escape.Escaper;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.linkedin.data.template.DataTemplate;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import javassist.bytecode.ClassFile;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.Data;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractorBase;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.runtime.Task;
import org.apache.gobblin.runtime.api.Configurable;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment;
import org.apache.gobblin.runtime.api.GobblinInstancePluginFactory;
import org.apache.gobblin.runtime.api.JobCatalog;
import org.apache.gobblin.runtime.api.JobExecutionDriver;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.cli.CliObjectOption;
import org.apache.gobblin.runtime.cli.CliObjectSupport;
import org.apache.gobblin.runtime.cli.ConstructorAndPublicMethodsGobblinCliFactory;
import org.apache.gobblin.runtime.cli.NotOnCli;
import org.apache.gobblin.runtime.instance.SimpleGobblinInstanceEnvironment;
import org.apache.gobblin.runtime.instance.StandardGobblinInstanceDriver;
import org.apache.gobblin.runtime.job_catalog.ImmutableFSJobCatalog;
import org.apache.gobblin.runtime.job_catalog.PackagedTemplatesJobCatalogDecorator;
import org.apache.gobblin.runtime.job_catalog.StaticJobCatalog;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.gobblin.runtime.job_spec.ResolvedJobSpec;
import org.apache.gobblin.runtime.plugins.GobblinInstancePluginUtils;
import org.apache.gobblin.runtime.plugins.PluginStaticKeys;
import org.apache.gobblin.runtime.plugins.metrics.GobblinMetricsPlugin;
import org.apache.gobblin.runtime.std.DefaultConfigurableImpl;
import org.apache.gobblin.runtime.std.DefaultJobLifecycleListenerImpl;
import org.apache.gobblin.state.ConstructState;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.PullFileLoader;
/**
* A class used to run an embedded version of Gobblin. This class is only intended for running a single Gobblin job.
* If a large number of Gobblin jobs will be launched, use a {@link GobblinInstanceDriver} instead.
*
* Usage:
* new EmbeddedGobblin("jobName").setTemplate(myTemplate).setConfiguration("key","value").run();
*/
@Slf4j
public class EmbeddedGobblin {
public static class CliFactory extends ConstructorAndPublicMethodsGobblinCliFactory {
public CliFactory() {
super(EmbeddedGobblin.class);
}
@Override
public String getUsageString() {
return "-jobName <jobName> [OPTIONS]";
}
}
private static final Splitter KEY_VALUE_SPLITTER = Splitter.on(":").limit(2);
private final JobSpec.Builder specBuilder;
private final Map<String, String> userConfigMap;
private final Map<String, String> builtConfigMap;
private final Config defaultSysConfig;
private final Map<String, String> sysConfigOverrides;
private final Map<String, Integer> distributedJars;
private Runnable distributeJarsFunction;
private JobTemplate template;
private Logger useLog = log;
private FullTimeout launchTimeout = new FullTimeout(100, TimeUnit.SECONDS);
private FullTimeout jobTimeout = new FullTimeout(100, TimeUnit.DAYS);
private FullTimeout shutdownTimeout = new FullTimeout(100, TimeUnit.SECONDS);
private boolean dumpJStackOnTimeout = false;
private List<GobblinInstancePluginFactory> plugins = Lists.newArrayList();
@Getter
private Optional<Path> jobFile = Optional.absent();
public EmbeddedGobblin() {
this("EmbeddedGobblin");
}
@CliObjectSupport(argumentNames = {"jobName"})
public EmbeddedGobblin(String name) {
HadoopUtils.addGobblinSite();
this.specBuilder = new JobSpec.Builder(name);
this.userConfigMap = Maps.newHashMap();
this.builtConfigMap = Maps.newHashMap();
this.sysConfigOverrides = Maps.newHashMap();
this.defaultSysConfig = getDefaultSysConfig();
this.distributedJars = Maps.newHashMap();
loadCoreGobblinJarsToDistributedJars();
this.distributeJarsFunction = new Runnable() {
@Override
public void run() {
// NOOP
}
};
}
/**
* Specify job should run in MR mode.
*/
public EmbeddedGobblin mrMode() throws IOException {
this.sysConfigOverrides.put(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, JobLauncherFactory.JobLauncherType.MAPREDUCE.name());
this.builtConfigMap.put(ConfigurationKeys.FS_URI_KEY, FileSystem.get(new Configuration()).getUri().toString());
this.builtConfigMap.put(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY, "/tmp/EmbeddedGobblin_" + System.currentTimeMillis());
this.distributeJarsFunction = new Runnable() {
@Override
public void run() {
// Add jars needed at runtime to the sys config so MR job launcher will add them to distributed cache.
EmbeddedGobblin.this.sysConfigOverrides.put(ConfigurationKeys.JOB_JAR_FILES_KEY,
Joiner.on(",").join(getPrioritizedDistributedJars()));
}
};
return this;
}
/**
* Specify that the input jar should be added to workers' classpath on distributed mode.
*/
public EmbeddedGobblin distributeJar(String jarPath) {
return distributeJarWithPriority(jarPath, 0);
}
/**
* Specify that the input jar should be added to workers' classpath on distributed mode. Jars with lower priority value
* will appear first in the classpath. Default priority is 0.
*/
public EmbeddedGobblin distributeJarByClassWithPriority(Class<?> klazz, int priority) {
String jar = ClassUtil.findContainingJar(klazz);
if (jar == null) {
log.warn(String.format("Could not find jar for class %s. This is normal in test runs.", klazz));
return this;
}
return distributeJarWithPriority(jar, priority);
}
/**
* Specify that the input jar should be added to workers' classpath on distributed mode. Jars with lower priority value
* will appear first in the classpath. Default priority is 0.
*/
public synchronized EmbeddedGobblin distributeJarWithPriority(String jarPath, int priority) {
if (this.distributedJars.containsKey(jarPath)) {
this.distributedJars.put(jarPath, Math.min(priority, this.distributedJars.get(jarPath)));
} else {
this.distributedJars.put(jarPath, priority);
}
return this;
}
/**
* Set a {@link JobTemplate} to use.
*/
public EmbeddedGobblin setTemplate(JobTemplate template) {
this.template = template;
return this;
}
/**
* Set a {@link JobTemplate} to use.
*/
public EmbeddedGobblin setTemplate(String templateURI) throws URISyntaxException, SpecNotFoundException,
JobTemplate.TemplateException {
return setTemplate(new PackagedTemplatesJobCatalogDecorator().getTemplate(new URI(templateURI)));
}
/**
* Use a {@link org.apache.gobblin.runtime.api.GobblinInstancePlugin}.
*/
public EmbeddedGobblin usePlugin(GobblinInstancePluginFactory pluginFactory) {
this.plugins.add(pluginFactory);
return this;
}
/**
* Use a {@link org.apache.gobblin.runtime.api.GobblinInstancePlugin} identified by name.
*/
public EmbeddedGobblin usePlugin(String pluginAlias) throws ClassNotFoundException, IllegalAccessException, InstantiationException {
return usePlugin(GobblinInstancePluginUtils.instantiatePluginByAlias(pluginAlias));
}
/**
* Override a Gobblin system configuration.
*/
public EmbeddedGobblin sysConfig(String key, String value) {
this.sysConfigOverrides.put(key, value);
return this;
}
/**
* Override a Gobblin system configuration. Format "<key>:<value>"
*/
public EmbeddedGobblin sysConfig(String keyValue) {
List<String> split = KEY_VALUE_SPLITTER.splitToList(keyValue);
if (split.size() != 2) {
throw new RuntimeException("Cannot parse " + keyValue + ". Expected <key>:<value>.");
}
return sysConfig(split.get(0), split.get(1));
}
/**
* Provide a job file.
*/
public EmbeddedGobblin jobFile(String pathStr) {
this.jobFile = Optional.of(new Path(pathStr));
return this;
}
/**
* Load Kerberos keytab for authentication. Crendetials format "<login-user>:<keytab-file>".
*/
@CliObjectOption(description = "Authenticate using kerberos. Format: \"<login-user>:<keytab-file>\".")
public EmbeddedGobblin kerberosAuthentication(String credentials) {
List<String> split = Splitter.on(":").splitToList(credentials);
if (split.size() != 2) {
throw new RuntimeException("Cannot parse " + credentials + ". Expected <login-user>:<keytab-file>");
}
try {
usePlugin(PluginStaticKeys.HADOOP_LOGIN_FROM_KEYTAB_ALIAS);
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(String.format("Could not instantiate %s. Make sure gobblin-runtime-hadoop is in your classpath.",
PluginStaticKeys.HADOOP_LOGIN_FROM_KEYTAB_ALIAS), roe);
}
sysConfig(PluginStaticKeys.LOGIN_USER_FULL_KEY, split.get(0));
sysConfig(PluginStaticKeys.LOGIN_USER_KEYTAB_FILE_FULL_KEY, split.get(1));
return this;
}
/**
* Manually set a key-value pair in the job configuration.
*/
public EmbeddedGobblin setConfiguration(String key, String value) {
this.userConfigMap.put(key, value);
return this;
}
/**
* Manually set a key-value pair in the job configuration. Input is of the form <key>:<value>
*/
public EmbeddedGobblin setConfiguration(String keyValue) {
List<String> split = KEY_VALUE_SPLITTER.splitToList(keyValue);
if (split.size() != 2) {
throw new RuntimeException("Cannot parse " + keyValue + ". Expected <key>:<value>.");
}
return setConfiguration(split.get(0), split.get(1));
}
/**
* Set the timeout for the Gobblin job execution.
*/
public EmbeddedGobblin setJobTimeout(long timeout, TimeUnit timeUnit) {
this.jobTimeout = new FullTimeout(timeout, timeUnit);
return this;
}
/**
* Set the timeout for the Gobblin job execution from ISO-style period.
*/
public EmbeddedGobblin setJobTimeout(String timeout) {
return setJobTimeout(Period.parse(timeout).getSeconds(), TimeUnit.SECONDS);
}
/**
* Set the timeout for launching the Gobblin job.
*/
public EmbeddedGobblin setLaunchTimeout(long timeout, TimeUnit timeUnit) {
this.launchTimeout = new FullTimeout(timeout, timeUnit);
return this;
}
/**
* Set the timeout for launching the Gobblin job from ISO-style period.
*/
public EmbeddedGobblin setLaunchTimeout(String timeout) {
return setLaunchTimeout(Period.parse(timeout).getSeconds(), TimeUnit.SECONDS);
}
/**
* Set the timeout for shutting down the Gobblin instance driver after the job is done.
*/
public EmbeddedGobblin setShutdownTimeout(long timeout, TimeUnit timeUnit) {
this.shutdownTimeout = new FullTimeout(timeout, timeUnit);
return this;
}
/**
* Set the timeout for shutting down the Gobblin instance driver after the job is done from ISO-style period.
*/
public EmbeddedGobblin setShutdownTimeout(String timeout) {
return setShutdownTimeout(Period.parse(timeout).getSeconds(), TimeUnit.SECONDS);
}
/**
* Enable dumping jstack when error happens.
*/
public EmbeddedGobblin setDumpJStackOnTimeout(boolean dumpJStackOnTimeout) {
this.dumpJStackOnTimeout = dumpJStackOnTimeout;
return this;
}
/**
* Enable state store.
*/
public EmbeddedGobblin useStateStore(String rootDir) {
this.setConfiguration(ConfigurationKeys.STATE_STORE_ENABLED, "true");
this.setConfiguration(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, rootDir);
return this;
}
/**
* Enable metrics. Does not start any reporters.
*/
public EmbeddedGobblin enableMetrics() {
this.usePlugin(new GobblinMetricsPlugin.Factory());
this.sysConfig(ConfigurationKeys.METRICS_ENABLED_KEY, Boolean.toString(true));
return this;
}
/**
* This is the base {@link Config} used for the job, containing all default configurations. Subclasses can override
* default configurations (for example setting a particular {@link org.apache.gobblin.runtime.JobLauncherFactory.JobLauncherType}.
*/
protected Config getDefaultSysConfig() {
return ConfigFactory.parseResources("embedded/embedded.conf");
}
/**
* Run the Gobblin job. This call will block until the job is done.
* @return a {@link JobExecutionResult} containing the result of the execution.
*/
@NotOnCli
public JobExecutionResult run() throws InterruptedException, TimeoutException, ExecutionException {
JobExecutionDriver jobDriver = runAsync();
return jobDriver.get(this.jobTimeout.getTimeout(), this.jobTimeout.getTimeUnit());
}
/**
* Launch the Gobblin job asynchronously. This method will return when the Gobblin job has started.
* @return a {@link JobExecutionDriver}. This object is a future that will resolve when the Gobblin job finishes.
* @throws TimeoutException if the Gobblin job does not start within the launch timeout.
*/
@NotOnCli
public JobExecutionDriver runAsync() throws TimeoutException, InterruptedException {
// Run function to distribute jars to workers in distributed mode
this.distributeJarsFunction.run();
log.debug("BuiltConfigMap: {}", this.builtConfigMap);
log.debug("DefaultSysConfig: {}", this.defaultSysConfig);
Config sysProps = ConfigFactory.parseMap(this.builtConfigMap)
.withFallback(this.defaultSysConfig);
log.debug("Merged SysProps:{}", sysProps);
Config userConfig = ConfigFactory.parseMap(this.userConfigMap);
log.debug("UserConfig: {}", userConfig);
JobSpec jobSpec;
if (this.jobFile.isPresent()) {
try {
Path jobFilePath = this.jobFile.get();
PullFileLoader loader =
new PullFileLoader(jobFilePath.getParent(), jobFilePath.getFileSystem(new Configuration()),
PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS,
PullFileLoader.DEFAULT_HOCON_PULL_FILE_EXTENSIONS);
Config jobConfig = userConfig.withFallback(loader.loadPullFile(jobFilePath, sysProps, false));
log.debug("JobConfig: {}", jobConfig);
ImmutableFSJobCatalog.JobSpecConverter converter =
new ImmutableFSJobCatalog.JobSpecConverter(jobFilePath.getParent(), Optional.<String>absent());
jobSpec = converter.apply(jobConfig);
} catch (IOException ioe) {
throw new RuntimeException("Failed to run embedded Gobblin.", ioe);
}
} else {
Config finalConfig = userConfig.withFallback(sysProps);
if (this.template != null) {
this.specBuilder.withTemplate(this.template);
}
jobSpec = this.specBuilder.withConfig(finalConfig).build();
}
ResolvedJobSpec resolvedJobSpec;
try {
JobSpecResolver resolver = JobSpecResolver.builder(sysProps).build();
resolvedJobSpec = resolver.resolveJobSpec(jobSpec);
} catch (SpecNotFoundException | JobTemplate.TemplateException | IOException exc) {
throw new RuntimeException("Failed to resolved template.", exc);
}
final JobCatalog jobCatalog = new StaticJobCatalog(Optional.of(this.useLog), Lists.<JobSpec>newArrayList(resolvedJobSpec));
SimpleGobblinInstanceEnvironment instanceEnvironment =
new SimpleGobblinInstanceEnvironment("EmbeddedGobblinInstance", this.useLog, getSysConfig());
StandardGobblinInstanceDriver.Builder builder =
new StandardGobblinInstanceDriver.Builder(Optional.<GobblinInstanceEnvironment>of(instanceEnvironment)).withLog(this.useLog)
.withJobCatalog(jobCatalog)
.withImmediateJobScheduler();
for (GobblinInstancePluginFactory plugin : this.plugins) {
builder.addPlugin(plugin);
}
final GobblinInstanceDriver driver = builder.build();
EmbeddedJobLifecycleListener listener = new EmbeddedJobLifecycleListener(this.useLog);
driver.registerJobLifecycleListener(listener);
driver.startAsync();
boolean started = listener.awaitStarted(this.launchTimeout.getTimeout(), this.launchTimeout.getTimeUnit());
if (!started) {
dumpJStackOnTimeout("Launch");
log.warn("Timeout waiting for job to start. Aborting.");
driver.stopAsync();
driver.awaitTerminated(this.shutdownTimeout.getTimeout(), this.shutdownTimeout.getTimeUnit());
throw new TimeoutException("Timeout waiting for job to start.");
}
final JobExecutionDriver jobDriver = listener.getJobDriver();
// Stop the Gobblin instance driver when the job finishes.
Futures.addCallback(jobDriver, new FutureCallback<JobExecutionResult>() {
@Override
public void onSuccess(@Nullable JobExecutionResult result) {
stopGobblinInstanceDriver();
}
@Override
public void onFailure(Throwable t) {
stopGobblinInstanceDriver();
}
private void stopGobblinInstanceDriver() {
try {
driver.stopAsync();
driver.awaitTerminated(EmbeddedGobblin.this.shutdownTimeout.getTimeout(), EmbeddedGobblin.this.shutdownTimeout
.getTimeUnit());
} catch (TimeoutException te) {
dumpJStackOnTimeout("stop gobblin instance driver");
log.error("Failed to shutdown Gobblin instance driver.");
}
}
});
return listener.getJobDriver();
}
private void dumpJStackOnTimeout(String loc) {
if (this.dumpJStackOnTimeout) {
log.info("=== Dump jstack ({}) ===", loc);
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
ThreadInfo[] infos = bean.dumpAllThreads(true, true);
Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
Map<Long, Thread> threadMap = new HashMap<>();
for (Thread t : threadSet) {
threadMap.put(t.getId(), t);
}
for (ThreadInfo info : infos) {
Thread thread = threadMap.get(info.getThreadId());
log.info("({}) {}",
thread == null ? "Unknown" : thread.isDaemon() ? "Daemon" : "Non-Daemon", info.toString());
}
} else {
log.info("Dump jstack ({}) is disabled.", loc);
}
}
@VisibleForTesting
public Configurable getSysConfig() {
return DefaultConfigurableImpl.createFromConfig(ConfigFactory.parseMap(this.sysConfigOverrides).withFallback(this.defaultSysConfig));
}
/**
* This returns the set of jars required by a basic Gobblin ingestion job. In general, these need to be distributed
* to workers in a distributed environment.
*/
private void loadCoreGobblinJarsToDistributedJars() {
// Gobblin-api
distributeJarByClassWithPriority(State.class, 0);
// Gobblin-core
distributeJarByClassWithPriority(ConstructState.class, 0);
// Gobblin-core-base
distributeJarByClassWithPriority(InstrumentedExtractorBase.class, 0);
// Gobblin-metrics-base
distributeJarByClassWithPriority(MetricContext.class, 0);
// Gobblin-metrics
distributeJarByClassWithPriority(GobblinMetrics.class, 0);
// Gobblin-metastore
distributeJarByClassWithPriority(FsStateStore.class, 0);
// Gobblin-runtime
distributeJarByClassWithPriority(Task.class, 0);
// Gobblin-utility
distributeJarByClassWithPriority(PathUtils.class, 0);
// joda-time
distributeJarByClassWithPriority(ReadableInstant.class, 0);
// guava
distributeJarByClassWithPriority(Escaper.class, -10); // Escaper was added in guava 15, so we use it to identify correct jar
// dropwizard.metrics-core
distributeJarByClassWithPriority(MetricFilter.class, 0);
// pegasus
distributeJarByClassWithPriority(DataTemplate.class, 0);
// commons-lang3
distributeJarByClassWithPriority(ClassUtils.class, 0);
// avro
distributeJarByClassWithPriority(SchemaBuilder.class, 0);
// guava-retry
distributeJarByClassWithPriority(RetryListener.class, 0);
// config
distributeJarByClassWithPriority(ConfigFactory.class, 0);
// reflections
distributeJarByClassWithPriority(Reflections.class, 0);
// javassist
distributeJarByClassWithPriority(ClassFile.class, 0);
}
/**
* Encapsulates a timeout with corresponding {@link TimeUnit}.
*/
@Data
private static class FullTimeout {
private final long timeout;
private final TimeUnit timeUnit;
}
@VisibleForTesting
protected List<String> getPrioritizedDistributedJars() {
List<Map.Entry<String, Integer>> jarsWithPriority = Lists.newArrayList(this.distributedJars.entrySet());
Collections.sort(jarsWithPriority, new Comparator<Map.Entry<String, Integer>>() {
@Override
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
return Integer.compare(o1.getValue(), o2.getValue());
}
});
return Lists.transform(jarsWithPriority, new Function<Map.Entry<String, Integer>, String>() {
@Override
public String apply(Map.Entry<String, Integer> input) {
return input.getKey();
}
});
}
/**
* A {@link org.apache.gobblin.runtime.api.JobLifecycleListener} that listens for a particular job and detects the start of the job.
*/
private static class EmbeddedJobLifecycleListener extends DefaultJobLifecycleListenerImpl {
private final Lock lock = new ReentrantLock();
private final Condition runningStateCondition = this.lock.newCondition();
private volatile boolean running = false;
@Getter(value = AccessLevel.PRIVATE)
private JobExecutionDriver jobDriver;
public EmbeddedJobLifecycleListener(Logger log) {
super(log);
}
/**
* Block until the job has started.
* @return true if the job started, false on timeout.
*/
public boolean awaitStarted(long timeout, TimeUnit timeUnit) throws InterruptedException {
this.lock.lock();
try {
long startTime = System.currentTimeMillis();
long totalTimeMillis = timeUnit.toMillis(timeout);
while (!running) {
long millisLeft = totalTimeMillis - (System.currentTimeMillis() - startTime);
if (millisLeft < 0) {
return false;
}
boolean outoftime = this.runningStateCondition.await(millisLeft, TimeUnit.MILLISECONDS);
}
} finally {
this.lock.unlock();
}
return true;
}
@Override
public void onJobLaunch(JobExecutionDriver jobDriver) {
if (this.jobDriver != null) {
throw new IllegalStateException("OnJobLaunch called when a job was already running.");
}
super.onJobLaunch(jobDriver);
this.lock.lock();
try {
this.running = true;
this.jobDriver = jobDriver;
this.runningStateCondition.signal();
} finally {
this.lock.unlock();
}
}
}
}
| 1,619 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/services/MetricsReportingService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.services;
import java.util.Properties;
import com.google.common.util.concurrent.AbstractIdleService;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.reporter.util.MetricReportUtils;
import org.apache.gobblin.util.PropertiesUtils;
/**
* A {@link com.google.common.util.concurrent.Service} for handling life cycle events around {@link GobblinMetrics}.
*/
@Slf4j
public class MetricsReportingService extends AbstractIdleService {
public static final String METRICS_REPORTING_FAILURE_FATAL_KEY = "metrics.reporting.failure.fatal";
public static final String EVENT_REPORTING_FAILURE_FATAL_KEY = "event.reporting.failure.fatal";
public static final String DEFAULT_METRICS_REPORTING_FAILURE_FATAL = "false";
public static final String DEFAULT_EVENT_REPORTING_FAILURE_FATAL = "false";
private final Properties properties;
private final String appId;
private final boolean isMetricReportingFailureFatal;
private final boolean isEventReportingFailureFatal;
public MetricsReportingService(Properties properties, String appId) {
this.properties = properties;
this.appId = appId;
this.isMetricReportingFailureFatal = PropertiesUtils.getPropAsBoolean(properties, METRICS_REPORTING_FAILURE_FATAL_KEY, DEFAULT_METRICS_REPORTING_FAILURE_FATAL);
this.isEventReportingFailureFatal = PropertiesUtils.getPropAsBoolean(properties, EVENT_REPORTING_FAILURE_FATAL_KEY, DEFAULT_EVENT_REPORTING_FAILURE_FATAL);
}
@Override
protected void startUp() throws Exception {
try {
GobblinMetrics.get(this.appId).startMetricReporting(this.properties);
} catch (MultiReporterException ex) {
if (MetricReportUtils.shouldThrowException(log, ex, this.isMetricReportingFailureFatal, this.isEventReportingFailureFatal)) {
throw ex;
}
}
}
@Override
protected void shutDown() throws Exception {
GobblinMetrics.get(this.appId).stopMetricsReporting();
}
}
| 1,620 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/services/JMXReportingService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.services;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.jmx.JmxReporter;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.MetricSet;
import com.codahale.metrics.jvm.FileDescriptorRatioGauge;
import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
import com.codahale.metrics.jvm.ThreadStatesGaugeSet;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.AbstractIdleService;
/**
* A {@link com.google.common.util.concurrent.Service} for collecting various JVM metrics and reporting them via JMX.
*
* <p>
* The class uses Codahale to collect the various JVM metrics which includes:
*
* <ul>
* <li>GC activity using a {@link GarbageCollectorMetricSet}</li>
* <li>Memory usage using a {@link MemoryUsageGaugeSet}</li>
* <li>Thread usage and state using a {@link ThreadStatesGaugeSet}</li>
* <li>Used file descriptors using a {@link FileDescriptorRatioGauge}</li>
* </ul>
*
* All metrics are collected via a {@link JmxReporter}.
* </p>
*/
public class JMXReportingService extends AbstractIdleService {
private final MetricRegistry metricRegistry = new MetricRegistry();
private Map<String, MetricSet> additionalMetricSets;
private final JmxReporter jmxReporter = JmxReporter.forRegistry(this.metricRegistry)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
public JMXReportingService() {
this(ImmutableMap.of());
}
public JMXReportingService(Map<String, MetricSet> additionalMetricSets) {
this.additionalMetricSets = additionalMetricSets;
}
@Override
protected void startUp() throws Exception {
registerJvmMetrics();
this.jmxReporter.start();
}
@Override
protected void shutDown() throws Exception {
this.jmxReporter.stop();
}
private void registerJvmMetrics() {
registerMetricSetWithPrefix("jvm.gc", new GarbageCollectorMetricSet());
registerMetricSetWithPrefix("jvm.memory", new MemoryUsageGaugeSet());
registerMetricSetWithPrefix("jvm.threads", new ThreadStatesGaugeSet());
this.metricRegistry.register("jvm.fileDescriptorRatio", new FileDescriptorRatioGauge());
for (Map.Entry<String, MetricSet> metricSet : this.additionalMetricSets.entrySet()) {
registerMetricSetWithPrefix(metricSet.getKey(), metricSet.getValue());
}
}
private void registerMetricSetWithPrefix(String prefix, MetricSet metricSet) {
for (Map.Entry<String, Metric> entry : metricSet.getMetrics().entrySet()) {
this.metricRegistry.register(MetricRegistry.name(prefix, entry.getKey()), entry.getValue());
}
}
}
| 1,621 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metastore | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metastore/filesystem/FsDatasetStateStoreEntryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metastore.filesystem;
import java.io.IOException;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import org.apache.gobblin.runtime.FsDatasetStateStore;
import org.apache.gobblin.runtime.JobState;
import org.apache.hadoop.fs.FileStatus;
/**
* A {@link DatasetStateStoreEntryManager} generated by {@link FsDatasetStateStore}.
*/
public class FsDatasetStateStoreEntryManager extends DatasetStateStoreEntryManager<JobState.DatasetState> {
private final FsDatasetStateStore stateStore;
public FsDatasetStateStoreEntryManager(FileStatus fileStatus, FsDatasetStateStore stateStore) {
super(fileStatus.getPath().getParent().getName(), fileStatus.getPath().getName(), fileStatus.getModificationTime(),
new DatasetStateStore.TableNameParser(fileStatus.getPath().getName()), stateStore);
this.stateStore = stateStore;
}
@Override
public JobState.DatasetState readState() throws IOException {
return this.stateStore.getInternal(getStoreName(), getTableName(), getSanitizedDatasetUrn(), true);
}
@Override
public void delete() throws IOException {
this.stateStore.delete(getStoreName(), getTableName());
}
}
| 1,622 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metastore | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/metastore/mysql/MysqlDatasetStateStoreEntryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.metastore.mysql;
import java.io.IOException;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import org.apache.gobblin.metastore.metadata.StateStoreEntryManager;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.MysqlDatasetStateStore;
/**
* A {@link StateStoreEntryManager} generated by {@link MysqlDatasetStateStore}.
*/
public class MysqlDatasetStateStoreEntryManager extends DatasetStateStoreEntryManager<JobState.DatasetState> {
private final MysqlDatasetStateStore stateStore;
public MysqlDatasetStateStoreEntryManager(String storeName, String tableName, long modificationTime,
MysqlDatasetStateStore stateStore) {
super(storeName, tableName, modificationTime, new DatasetStateStore.TableNameParser(tableName), stateStore);
this.stateStore = stateStore;
}
@Override
public JobState.DatasetState readState() throws IOException {
return this.stateStore.get(getStoreName(), getTableName(), this.getStateId());
}
@Override
public void delete() throws IOException {
this.stateStore.delete(getStoreName(), getTableName());
}
}
| 1,623 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/dag_action_store/MysqlDagActionStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.dag_action_store;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collection;
import java.util.HashSet;
import com.google.inject.Inject;
import com.typesafe.config.Config;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.MysqlDataSourceFactory;
import org.apache.gobblin.runtime.api.DagActionStore;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExponentialBackoff;
import org.apache.gobblin.util.DBStatementExecutor;
@Slf4j
public class MysqlDagActionStore implements DagActionStore {
public static final String CONFIG_PREFIX = "MysqlDagActionStore";
protected final DataSource dataSource;
private final DBStatementExecutor dbStatementExecutor;
private final String tableName;
private final long retentionPeriodSeconds;
private String thisTableRetentionStatement;
private static final String EXISTS_STATEMENT = "SELECT EXISTS(SELECT * FROM %s WHERE flow_group = ? AND flow_name =? AND flow_execution_id = ? AND dag_action = ?)";
protected static final String INSERT_STATEMENT = "INSERT INTO %s (flow_group, flow_name, flow_execution_id, dag_action) "
+ "VALUES (?, ?, ?, ?)";
private static final String DELETE_STATEMENT = "DELETE FROM %s WHERE flow_group = ? AND flow_name =? AND flow_execution_id = ? AND dag_action = ?";
private static final String GET_STATEMENT = "SELECT flow_group, flow_name, flow_execution_id, dag_action FROM %s WHERE flow_group = ? AND flow_name =? AND flow_execution_id = ? AND dag_action = ?";
private static final String GET_ALL_STATEMENT = "SELECT flow_group, flow_name, flow_execution_id, dag_action FROM %s";
private static final String CREATE_TABLE_STATEMENT = "CREATE TABLE IF NOT EXISTS %s (" +
"flow_group varchar(" + ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH + ") NOT NULL, flow_name varchar(" + ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH + ") NOT NULL, "
+ "flow_execution_id varchar(" + ServiceConfigKeys.MAX_FLOW_EXECUTION_ID_LENGTH + ") NOT NULL, "
+ "dag_action varchar(100) NOT NULL, modified_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP NOT NULL, "
+ "PRIMARY KEY (flow_group,flow_name,flow_execution_id, dag_action))";
// Deletes rows older than retention time period (in seconds) to prevent this table from growing unbounded.
private static final String RETENTION_STATEMENT = "DELETE FROM %s WHERE modified_time < DATE_SUB(CURRENT_TIMESTAMP, INTERVAL %s SECOND)";
private final int getDagActionMaxRetries;
@Inject
public MysqlDagActionStore(Config config) throws IOException {
if (config.hasPath(CONFIG_PREFIX)) {
config = config.getConfig(CONFIG_PREFIX).withFallback(config);
} else {
throw new IOException("Please specify the config for MysqlDagActionStore");
}
this.tableName = ConfigUtils.getString(config, ConfigurationKeys.STATE_STORE_DB_TABLE_KEY,
ConfigurationKeys.DEFAULT_STATE_STORE_DB_TABLE);
this.getDagActionMaxRetries = ConfigUtils.getInt(config, ConfigurationKeys.MYSQL_GET_MAX_RETRIES, ConfigurationKeys.DEFAULT_MYSQL_GET_MAX_RETRIES);
this.retentionPeriodSeconds = ConfigUtils.getLong(config, ConfigurationKeys.MYSQL_DAG_ACTION_STORE_TABLE_RETENTION_PERIOD_SECONDS_KEY,
ConfigurationKeys.DEFAULT_MYSQL_DAG_ACTION_STORE_TABLE_RETENTION_PERIOD_SEC_KEY);
this.dataSource = MysqlDataSourceFactory.get(config,
SharedResourcesBrokerFactory.getImplicitBroker());
try (Connection connection = dataSource.getConnection();
PreparedStatement createStatement = connection.prepareStatement(String.format(CREATE_TABLE_STATEMENT, tableName))) {
createStatement.executeUpdate();
connection.commit();
} catch (SQLException e) {
throw new IOException("Failure creation table " + tableName, e);
}
this.dbStatementExecutor = new DBStatementExecutor(this.dataSource, log);
this.thisTableRetentionStatement = String.format(RETENTION_STATEMENT, this.tableName, retentionPeriodSeconds);
// Periodically deletes all rows in the table last_modified before the retention period defined by config.
dbStatementExecutor.repeatSqlCommandExecutionAtInterval(thisTableRetentionStatement, 6, TimeUnit.HOURS);
}
@Override
public boolean exists(String flowGroup, String flowName, String flowExecutionId, FlowActionType flowActionType) throws IOException, SQLException {
return dbStatementExecutor.withPreparedStatement(String.format(EXISTS_STATEMENT, tableName), existStatement -> {
int i = 0;
existStatement.setString(++i, flowGroup);
existStatement.setString(++i, flowName);
existStatement.setString(++i, flowExecutionId);
existStatement.setString(++i, flowActionType.toString());
ResultSet rs = null;
try {
rs = existStatement.executeQuery();
rs.next();
return rs.getBoolean(1);
} catch (SQLException e) {
throw new IOException(String.format("Failure checking existence of DagAction: %s in table %s",
new DagAction(flowGroup, flowName, flowExecutionId, flowActionType), tableName), e);
} finally {
if (rs != null) {
rs.close();
}
}
}, true);
}
@Override
public void addDagAction(String flowGroup, String flowName, String flowExecutionId, FlowActionType flowActionType)
throws IOException {
dbStatementExecutor.withPreparedStatement(String.format(INSERT_STATEMENT, tableName), insertStatement -> {
try {
int i = 0;
insertStatement.setString(++i, flowGroup);
insertStatement.setString(++i, flowName);
insertStatement.setString(++i, flowExecutionId);
insertStatement.setString(++i, flowActionType.toString());
return insertStatement.executeUpdate();
} catch (SQLException e) {
throw new IOException(String.format("Failure adding action for DagAction: %s in table %s",
new DagAction(flowGroup, flowName, flowExecutionId, flowActionType), tableName), e);
}}, true);
}
@Override
public boolean deleteDagAction(DagAction dagAction) throws IOException {
return dbStatementExecutor.withPreparedStatement(String.format(DELETE_STATEMENT, tableName), deleteStatement -> {
try {
int i = 0;
deleteStatement.setString(++i, dagAction.getFlowGroup());
deleteStatement.setString(++i, dagAction.getFlowName());
deleteStatement.setString(++i, dagAction.getFlowExecutionId());
deleteStatement.setString(++i, dagAction.getFlowActionType().toString());
int result = deleteStatement.executeUpdate();
return result != 0;
} catch (SQLException e) {
throw new IOException(String.format("Failure deleting action for DagAction: %s in table %s", dagAction,
tableName), e);
}}, true);
}
// TODO: later change this to getDagActions relating to a particular flow execution if it makes sense
private DagAction getDagActionWithRetry(String flowGroup, String flowName, String flowExecutionId, FlowActionType flowActionType, ExponentialBackoff exponentialBackoff)
throws IOException, SQLException {
return dbStatementExecutor.withPreparedStatement(String.format(GET_STATEMENT, tableName), getStatement -> {
int i = 0;
getStatement.setString(++i, flowGroup);
getStatement.setString(++i, flowName);
getStatement.setString(++i, flowExecutionId);
getStatement.setString(++i, flowActionType.toString());
try (ResultSet rs = getStatement.executeQuery()) {
if (rs.next()) {
return new DagAction(rs.getString(1), rs.getString(2), rs.getString(3), FlowActionType.valueOf(rs.getString(4)));
} else if (exponentialBackoff.awaitNextRetryIfAvailable()) {
return getDagActionWithRetry(flowGroup, flowName, flowExecutionId, flowActionType, exponentialBackoff);
} else {
log.warn(String.format("Can not find dag action: %s with flowGroup: %s, flowName: %s, flowExecutionId: %s",
flowActionType, flowGroup, flowName, flowExecutionId));
return null;
}
} catch (SQLException | InterruptedException e) {
throw new IOException(String.format("Failure get %s from table %s",
new DagAction(flowGroup, flowName, flowExecutionId, flowActionType), tableName), e);
}
}, true);
}
@Override
public Collection<DagAction> getDagActions() throws IOException {
return dbStatementExecutor.withPreparedStatement(String.format(GET_ALL_STATEMENT, tableName), getAllStatement -> {
HashSet<DagAction> result = new HashSet<>();
try (ResultSet rs = getAllStatement.executeQuery()) {
while (rs.next()) {
result.add(new DagAction(rs.getString(1), rs.getString(2), rs.getString(3), FlowActionType.valueOf(rs.getString(4))));
}
return result;
} catch (SQLException e) {
throw new IOException(String.format("Failure get dag actions from table %s ", tableName), e);
}
}, true);
}
}
| 1,624 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_monitor/SLAEventKafkaJobMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_monitor;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import com.codahale.metrics.Counter;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValue;
import lombok.Getter;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.metrics.reporter.util.NoopSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecMonitor;
import org.apache.gobblin.runtime.api.JobSpecMonitorFactory;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.metrics.RuntimeMetrics;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A {@link KafkaJobMonitor} that parses SLA {@link GobblinTrackingEvent}s and generates {@link JobSpec}s. Used
* to trigger jobs on data availability.
*/
@Getter
public class SLAEventKafkaJobMonitor extends KafkaAvroJobMonitor<GobblinTrackingEvent> {
public static final String CONFIG_PREFIX = "gobblin.jobMonitor.slaEvent";
public static final String DATASET_URN_FILTER_KEY = "filter.urn";
public static final String EVENT_NAME_FILTER_KEY = "filter.name";
public static final String TEMPLATE_KEY = "job_template";
public static final String EXTRACT_KEYS = "extract_keys";
public static final String BASE_URI_KEY = "baseUri";
public static final String TOPIC_KEY = "topic";
public static final String SCHEMA_VERSION_READER_CLASS = "versionReaderClass";
private static final Config DEFAULTS = ConfigFactory.parseMap(ImmutableMap.of(
BASE_URI_KEY, SLAEventKafkaJobMonitor.class.getSimpleName(),
SCHEMA_VERSION_READER_CLASS, NoopSchemaVersionWriter.class.getName()));
private final Optional<Pattern> urnFilter;
private final Optional<Pattern> nameFilter;
private final URI baseURI;
private final URI template;
private final Map<String, String> extractKeys;
private Counter rejectedEvents;
public static class Factory implements JobSpecMonitorFactory {
@Override
public JobSpecMonitor forJobCatalog(GobblinInstanceDriver instanceDriver, MutableJobCatalog jobCatalog)
throws IOException {
Config config = instanceDriver.getSysConfig().getConfig().getConfig(CONFIG_PREFIX).withFallback(DEFAULTS);
return forConfig(config, jobCatalog);
}
/**
* Create a {@link SLAEventKafkaJobMonitor} from an input {@link Config}. Useful for multiple monitors, where
* the configuration of each monitor is scoped.
* @param localScopeConfig The sub-{@link Config} for this monitor without any namespacing (e.g. the key for
* topic should simply be "topic").
* @throws IOException
*/
public JobSpecMonitor forConfig(Config localScopeConfig, MutableJobCatalog jobCatalog) throws IOException {
Preconditions.checkArgument(localScopeConfig.hasPath(TEMPLATE_KEY));
Preconditions.checkArgument(localScopeConfig.hasPath(TOPIC_KEY));
String topic = localScopeConfig.getString(TOPIC_KEY);
URI baseUri;
try {
baseUri = new URI(localScopeConfig.getString(BASE_URI_KEY));
} catch (URISyntaxException use) {
throw new IOException("Invalid base URI " + localScopeConfig.getString(BASE_URI_KEY), use);
}
String templateURIString = localScopeConfig.getString(TEMPLATE_KEY);
URI template;
try {
template = new URI(templateURIString);
} catch (URISyntaxException uri) {
throw new IOException("Invalid template URI " + templateURIString);
}
ImmutableMap.Builder<String, String> mapBuilder = ImmutableMap.builder();
if (localScopeConfig.hasPath(EXTRACT_KEYS)) {
Config extractKeys = localScopeConfig.getConfig(EXTRACT_KEYS);
for (Map.Entry<String, ConfigValue> entry : extractKeys.entrySet()) {
Object unwrappedValue = entry.getValue().unwrapped();
if (unwrappedValue instanceof String) {
mapBuilder.put(entry.getKey(), (String) unwrappedValue);
}
}
}
Map<String, String> extractKeys = mapBuilder.build();
Optional<Pattern> urnFilter = localScopeConfig.hasPath(DATASET_URN_FILTER_KEY)
? Optional.of(Pattern.compile(localScopeConfig.getString(DATASET_URN_FILTER_KEY)))
: Optional.<Pattern>absent();
Optional<Pattern> nameFilter = localScopeConfig.hasPath(EVENT_NAME_FILTER_KEY)
? Optional.of(Pattern.compile(localScopeConfig.getString(EVENT_NAME_FILTER_KEY)))
: Optional.<Pattern>absent();
SchemaVersionWriter versionWriter;
try {
versionWriter = (SchemaVersionWriter) GobblinConstructorUtils.
invokeLongestConstructor(Class.forName(localScopeConfig.getString(SCHEMA_VERSION_READER_CLASS)), localScopeConfig);
} catch (ReflectiveOperationException roe) {
throw new IllegalArgumentException(roe);
}
return new SLAEventKafkaJobMonitor(topic, jobCatalog, baseUri, localScopeConfig, versionWriter,
urnFilter, nameFilter, template, extractKeys);
}
}
protected SLAEventKafkaJobMonitor(String topic, MutableJobCatalog catalog, URI baseURI, Config limitedScopeConfig,
SchemaVersionWriter<?> versionWriter, Optional<Pattern> urnFilter, Optional<Pattern> nameFilter, URI template,
Map<String, String> extractKeys) throws IOException {
super(topic, catalog, limitedScopeConfig, GobblinTrackingEvent.SCHEMA$, versionWriter);
this.baseURI = baseURI;
this.urnFilter = urnFilter;
this.nameFilter = nameFilter;
this.template = template;
this.extractKeys = extractKeys;
}
@Override
protected void createMetrics() {
super.createMetrics();
this.rejectedEvents = getMetricContext().counter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_SLAEVENT_REJECTEDEVENTS);
}
@Override
public Collection<JobSpec> parseJobSpec(GobblinTrackingEvent event) {
if (!acceptEvent(event)) {
this.rejectedEvents.inc();
return Lists.newArrayList();
}
String datasetURN = event.getMetadata().get(SlaEventKeys.DATASET_URN_KEY);
URI jobSpecURI = PathUtils.mergePaths(new Path(this.baseURI), new Path(datasetURN)).toUri();
Map<String, String> jobConfigMap = Maps.newHashMap();
for (Map.Entry<String, String> entry : this.extractKeys.entrySet()) {
if (event.getMetadata().containsKey(entry.getKey())) {
jobConfigMap.put(entry.getValue(), event.getMetadata().get(entry.getKey()));
}
}
Config jobConfig = ConfigFactory.parseMap(jobConfigMap);
JobSpec jobSpec = JobSpec.builder(jobSpecURI).withTemplate(this.template).withConfig(jobConfig).build();
return Lists.newArrayList(jobSpec);
}
/**
* Filter for {@link GobblinTrackingEvent}. Used to quickly determine whether an event should be used to produce
* a {@link JobSpec}.
*/
protected boolean acceptEvent(GobblinTrackingEvent event) {
if (!event.getMetadata().containsKey(SlaEventKeys.DATASET_URN_KEY)) {
return false;
}
String datasetURN = event.getMetadata().get(SlaEventKeys.DATASET_URN_KEY);
if (this.urnFilter.isPresent() && !this.urnFilter.get().matcher(datasetURN).find()) {
return false;
}
if (this.nameFilter.isPresent() && !this.nameFilter.get().matcher(event.getName()).find()) {
return false;
}
return true;
}
}
| 1,625 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_monitor/KafkaJobMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_monitor;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.typesafe.config.Config;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecMonitor;
import org.apache.gobblin.runtime.api.JobSpecNotFoundException;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.kafka.HighLevelConsumer;
import org.apache.gobblin.runtime.metrics.RuntimeMetrics;
import org.apache.gobblin.util.ConfigUtils;
/**
* Abstract {@link JobSpecMonitor} that reads {@link JobSpec}s from a Kafka stream. Subclasses should implement
* {@link KafkaJobMonitor#parseJobSpec(byte[])} to transform the message into one or multiple {@link JobSpec}s.
*/
@Slf4j
public abstract class KafkaJobMonitor extends HighLevelConsumer<byte[], byte[]> implements JobSpecMonitor {
public static final String KAFKA_JOB_MONITOR_PREFIX = "jobSpecMonitor.kafka";
public static final String KAFKA_AUTO_OFFSET_RESET_KEY = KAFKA_JOB_MONITOR_PREFIX + ".auto.offset.reset";
public static final String KAFKA_AUTO_OFFSET_RESET_SMALLEST = "smallest";
public static final String KAFKA_AUTO_OFFSET_RESET_LARGEST = "largest";
protected DatasetStateStore datasetStateStore;
protected final MutableJobCatalog jobCatalog;
@Getter
protected ContextAwareMeter newSpecs;
@Getter
protected ContextAwareMeter updatedSpecs;
@Getter
protected ContextAwareMeter cancelledSpecs;
@Getter
protected ContextAwareMeter removedSpecs;
@Getter
protected ContextAwareMeter totalSpecs;
/**
* @return A collection of {@link JobSpec}s to add/update/remove from the catalog,
* parsed from the Kafka message.
* @throws IOException
*/
public abstract Collection<JobSpec> parseJobSpec(byte[] message) throws IOException;
public KafkaJobMonitor(String topic, MutableJobCatalog catalog, Config config) {
super(topic, ConfigUtils.getConfigOrEmpty(config, KAFKA_JOB_MONITOR_PREFIX), 1);
this.jobCatalog = catalog;
try {
this.datasetStateStore = DatasetStateStore.buildDatasetStateStore(config);
} catch (Exception e) {
log.warn("DatasetStateStore could not be created.", e);
}
}
@Override
protected void createMetrics() {
super.createMetrics();
this.newSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_NEW_SPECS);
this.updatedSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_UPDATED_SPECS);
this.removedSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_REMOVED_SPECS);
this.cancelledSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_CANCELLED_SPECS);
this.totalSpecs = this.getMetricContext().contextAwareMeter(RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_TOTAL_SPECS);
}
@VisibleForTesting
@Override
protected void buildMetricsContextAndMetrics() {
super.buildMetricsContextAndMetrics();
}
@VisibleForTesting
@Override
protected void shutdownMetrics()
throws IOException {
super.shutdownMetrics();
}
@Override
protected void processMessage(DecodeableKafkaRecord<byte[],byte[]> message) {
try {
Collection<JobSpec> parsedCollection = parseJobSpec(message.getValue());
for (JobSpec parsedMessage : parsedCollection) {
SpecExecutor.Verb verb;
try {
verb = SpecExecutor.Verb.valueOf(parsedMessage.getMetadata().get(SpecExecutor.VERB_KEY));
} catch (IllegalArgumentException | NullPointerException e) {
log.error("Unknown verb {} for spec {}", parsedMessage.getMetadata().get(SpecExecutor.VERB_KEY), parsedMessage.getUri());
continue;
}
this.totalSpecs.mark();
switch (verb) {
case ADD:
this.newSpecs.mark();
this.jobCatalog.put(parsedMessage);
break;
case UPDATE:
this.updatedSpecs.mark();
this.jobCatalog.put(parsedMessage);
break;
case UNKNOWN: // unknown are considered as add request to maintain backward compatibility
log.warn("Job Spec Verb is 'UNKNOWN', putting this spec in job catalog anyway.");
this.jobCatalog.put(parsedMessage);
break;
case DELETE:
this.removedSpecs.mark();
this.jobCatalog.remove(parsedMessage.getUri());
// Delete the job state if it is a delete spec request
deleteStateStore(parsedMessage.getUri());
break;
case CANCEL:
URI specUri = parsedMessage.getUri();
try {
JobSpec spec = this.jobCatalog.getJobSpec(specUri);
// If incoming job or existing job does not have an associated flow execution ID, default to cancelling the job
if (!spec.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY) || !parsedMessage.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
this.cancelledSpecs.mark();
this.jobCatalog.remove(specUri, true);
} else {
// Validate that the flow execution ID of the running flow matches the one in the incoming job spec
String flowIdToCancel = parsedMessage.getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
if (spec.getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY).equals(flowIdToCancel)) {
this.cancelledSpecs.mark();
this.jobCatalog.remove(specUri, true);
} else {
log.warn("Job spec {} that has flow execution ID {} could not be cancelled, incoming request expects to cancel flow execution ID {}", specUri,
spec.getConfig().getString(ConfigurationKeys.FLOW_EXECUTION_ID_KEY), flowIdToCancel);
}
}
} catch (JobSpecNotFoundException e) {
log.warn("Could not find job spec {} to cancel in job catalog", specUri);
}
break;
default:
log.error("Cannot process spec {} with verb {}", parsedMessage.getUri(), verb);
}
}
} catch (IOException ioe) {
String messageStr = new String(message.getValue(), Charsets.UTF_8);
log.error(String.format("Failed to parse kafka message with offset %d: %s.", message.getOffset(), messageStr), ioe);
}
}
/**
* It fetches the job name from the given jobSpecUri
* and deletes its corresponding state store
* @param jobSpecUri jobSpecUri as created by
* {@link org.apache.gobblin.runtime.api.FlowSpec.Utils#createFlowSpecUri}
* @throws IOException
*/
private void deleteStateStore(URI jobSpecUri) throws IOException {
int EXPECTED_NUM_URI_TOKENS = 3;
String[] uriTokens = jobSpecUri.getPath().split("/");
if (null == this.datasetStateStore) {
log.warn("Job state store deletion failed as datasetstore is not initialized.");
return;
}
if (uriTokens.length != EXPECTED_NUM_URI_TOKENS) {
log.error("Invalid URI {}.", jobSpecUri);
return;
}
String jobName = uriTokens[EXPECTED_NUM_URI_TOKENS - 1];
this.datasetStateStore.delete(jobName);
log.info("JobSpec {} deleted with statestore.", jobSpecUri);
}
}
| 1,626 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_monitor/KafkaAvroJobMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_monitor;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.List;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.specific.SpecificDatumReader;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.metrics.RuntimeMetrics;
/**
* A job monitor for Avro messages. Uses a fixed input schema to parse the messages, then calls
* {@link #parseJobSpec(T)} for each one.
* @param <T> the subclass of {@link org.apache.avro.specific.SpecificRecord} that messages implement.
*/
@Slf4j
public abstract class KafkaAvroJobMonitor<T> extends KafkaJobMonitor {
private final Schema schema;
private final ThreadLocal<BinaryDecoder> decoder;
private final ThreadLocal<SpecificDatumReader<T>> reader;
@Getter
private final SchemaVersionWriter<?> versionWriter;
@Getter
private ContextAwareMeter messageParseFailures;
public KafkaAvroJobMonitor(String topic, MutableJobCatalog catalog, Config config, Schema schema,
SchemaVersionWriter<?> versionWriter) {
super(topic, catalog, config);
this.schema = schema;
this.decoder = new ThreadLocal<BinaryDecoder>() {
@Override
protected BinaryDecoder initialValue() {
InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]);
return DecoderFactory.get().binaryDecoder(dummyInputStream, null);
}
};
this.reader = new ThreadLocal<SpecificDatumReader<T>>() {
@Override
protected SpecificDatumReader<T> initialValue() {
return new SpecificDatumReader<>(KafkaAvroJobMonitor.this.schema);
}
};
this.versionWriter = versionWriter;
}
@Override
protected List<Tag<?>> getTagsForMetrics() {
List<Tag<?>> tags = super.getTagsForMetrics();
tags.add(new Tag<>(RuntimeMetrics.SCHEMA, this.schema.getName()));
return tags;
}
@Override
protected void createMetrics() {
super.createMetrics();
this.messageParseFailures = this.getMetricContext().contextAwareMeter(
RuntimeMetrics.GOBBLIN_JOB_MONITOR_KAFKA_MESSAGE_PARSE_FAILURES);
}
@Override
public Collection<JobSpec> parseJobSpec(byte[] message)
throws IOException {
InputStream is = new ByteArrayInputStream(message);
this.versionWriter.readSchemaVersioningInformation(new DataInputStream(is));
Decoder decoder = DecoderFactory.get().binaryDecoder(is, this.decoder.get());
try {
T decodedMessage = this.reader.get().read(null, decoder);
return parseJobSpec(decodedMessage);
} catch (AvroRuntimeException | IOException exc) {
this.messageParseFailures.mark();
if (this.messageParseFailures.getFiveMinuteRate() < 1) {
log.warn("Unable to decode input message.", exc);
} else {
log.warn("Unable to decode input message.");
}
return Lists.newArrayList();
}
}
/**
* Extract {@link JobSpec}s from the Kafka message.
*/
public abstract Collection<JobSpec> parseJobSpec(T message);
}
| 1,627 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_monitor/AvroJobSpecKafkaJobMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_monitor;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Properties;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecMonitor;
import org.apache.gobblin.runtime.api.JobSpecMonitorFactory;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.job_spec.AvroJobSpec;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A {@link KafkaJobMonitor} that parses {@link AvroJobSpec}s and generates {@link JobSpec}s.
*/
@Getter
@Slf4j
public class AvroJobSpecKafkaJobMonitor extends KafkaAvroJobMonitor<AvroJobSpec> {
public static final String CONFIG_PREFIX = "gobblin.jobMonitor.avroJobSpec";
public static final String TOPIC_KEY = "topic";
public static final String SCHEMA_VERSION_READER_CLASS = "versionReaderClass";
private static final Config DEFAULTS = ConfigFactory.parseMap(ImmutableMap.of(
SCHEMA_VERSION_READER_CLASS, FixedSchemaVersionWriter.class.getName()));
public static class Factory implements JobSpecMonitorFactory {
@Override
public JobSpecMonitor forJobCatalog(GobblinInstanceDriver instanceDriver, MutableJobCatalog jobCatalog)
throws IOException {
Config config = instanceDriver.getSysConfig().getConfig().getConfig(CONFIG_PREFIX).withFallback(DEFAULTS);
return forConfig(config, jobCatalog);
}
/**
* Create a {@link AvroJobSpecKafkaJobMonitor} from an input {@link Config}. Useful for multiple monitors, where
* the configuration of each monitor is scoped.
* @param localScopeConfig The sub-{@link Config} for this monitor without any namespacing (e.g. the key for
* topic should simply be "topic").
* @throws IOException
*/
public JobSpecMonitor forConfig(Config localScopeConfig, MutableJobCatalog jobCatalog) throws IOException {
Preconditions.checkArgument(localScopeConfig.hasPath(TOPIC_KEY));
Config config = localScopeConfig.withFallback(DEFAULTS);
String topic = config.getString(TOPIC_KEY);
SchemaVersionWriter versionWriter;
try {
versionWriter = (SchemaVersionWriter) GobblinConstructorUtils.
invokeLongestConstructor(Class.forName(config.getString(SCHEMA_VERSION_READER_CLASS)), config);
} catch (ReflectiveOperationException roe) {
throw new IllegalArgumentException(roe);
}
return new AvroJobSpecKafkaJobMonitor(topic, jobCatalog, config, versionWriter);
}
}
protected AvroJobSpecKafkaJobMonitor(String topic, MutableJobCatalog catalog, Config limitedScopeConfig,
SchemaVersionWriter<?> versionWriter) throws IOException {
super(topic, catalog, limitedScopeConfig, AvroJobSpec.SCHEMA$, versionWriter);
}
@Override
protected void createMetrics() {
super.createMetrics();
}
/**
* Creates a {@link JobSpec} or {@link URI} from the {@link AvroJobSpec} record.
* @param record the record as an {@link AvroJobSpec}
* @return a {@link JobSpec}
*/
@Override
public Collection<JobSpec> parseJobSpec(AvroJobSpec record) {
JobSpec.Builder jobSpecBuilder = JobSpec.builder(record.getUri());
Properties props = new Properties();
props.putAll(record.getProperties());
jobSpecBuilder.withJobCatalogURI(record.getUri()).withVersion(record.getVersion())
.withDescription(record.getDescription()).withConfigAsProperties(props).withMetadata(record.getMetadata());
if (!record.getTemplateUri().isEmpty()) {
try {
jobSpecBuilder.withTemplate(new URI(record.getTemplateUri()));
} catch (URISyntaxException e) {
log.error("could not parse template URI " + record.getTemplateUri());
}
}
JobSpec jobSpec = jobSpecBuilder.build();
log.info("Parsed job spec " + jobSpec.toString());
return Lists.newArrayList(jobSpec);
}
} | 1,628 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_exec/JobLauncherExecutionDriver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_exec;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.ExecutionList;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.SimpleScope;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerImpl;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.runtime.JobLauncherFactory.JobLauncherType;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.JobState.RunningState;
import org.apache.gobblin.runtime.api.Configurable;
import org.apache.gobblin.runtime.api.GobblinInstanceEnvironment;
import org.apache.gobblin.runtime.api.JobExecution;
import org.apache.gobblin.runtime.api.JobExecutionDriver;
import org.apache.gobblin.runtime.api.JobExecutionLauncher;
import org.apache.gobblin.runtime.api.JobExecutionMonitor;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.api.JobExecutionState;
import org.apache.gobblin.runtime.api.JobExecutionStateListener;
import org.apache.gobblin.runtime.api.JobExecutionStatus;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.MonitoredObject;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.job_spec.ResolvedJobSpec;
import org.apache.gobblin.runtime.listeners.AbstractJobListener;
import org.apache.gobblin.runtime.std.DefaultConfigurableImpl;
import org.apache.gobblin.runtime.std.JobExecutionStateListeners;
import org.apache.gobblin.runtime.std.JobExecutionUpdatable;
import org.apache.gobblin.util.ExecutorsUtils;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* An implementation of JobExecutionDriver which acts as an adapter to the legacy
* {@link JobLauncher} API.
*/
public class JobLauncherExecutionDriver extends FutureTask<JobExecutionResult> implements JobExecutionDriver {
private final Logger _log;
private final JobSpec _jobSpec;
private final JobExecutionUpdatable _jobExec;
private final JobExecutionState _jobState;
private final JobExecutionStateListeners _callbackDispatcher;
private final ExecutionList _executionList;
private final DriverRunnable _runnable;
private final Closer _closer;
private JobContext _jobContext;
/**
* Creates a new JobExecutionDriver which acts as an adapter to the legacy {@link JobLauncher} API.
* @param sysConfig the system/environment config
* @param jobSpec the JobSpec to be executed
* @param jobLauncherType an optional jobLauncher type; the value follows the convention of
* {@link JobLauncherFactory#newJobLauncher(Properties, Properties)}.
* If absent, {@link JobLauncherFactory#newJobLauncher(java.util.Properties, java.util.Properties)}
* will be used which looks for the {@link ConfigurationKeys#JOB_LAUNCHER_TYPE_KEY}
* in the system configuration.
* @param log an optional logger to be used; if none is specified, a default one
* will be instantiated.
* @param instrumentationEnabled a flag to control if metrics should be enabled.
* @param launcherMetrics an object to contain metrics related to jobLauncher.
* @param instanceBroker a broker to create difference resources from the same instance scope.
*/
public static JobLauncherExecutionDriver create(Configurable sysConfig, JobSpec jobSpec,
Optional<JobLauncherFactory.JobLauncherType> jobLauncherType,
Optional<Logger> log, boolean instrumentationEnabled,
JobExecutionLauncher.StandardMetrics launcherMetrics, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) {
Logger actualLog = log.isPresent() ? log.get() : LoggerFactory.getLogger(JobLauncherExecutionDriver.class);
JobExecutionStateListeners callbackDispatcher = new JobExecutionStateListeners(actualLog);
JobExecutionUpdatable jobExec = JobExecutionUpdatable.createFromJobSpec(jobSpec);
JobExecutionState jobState = new JobExecutionState(jobSpec, jobExec,
Optional.<JobExecutionStateListener>of(callbackDispatcher));
JobLauncher jobLauncher = createLauncher(sysConfig, jobSpec, actualLog, jobLauncherType.isPresent() ?
Optional.of(jobLauncherType.get().toString()) :
Optional.<String>absent(), instanceBroker);
JobListenerToJobStateBridge bridge = new JobListenerToJobStateBridge(actualLog, jobState, instrumentationEnabled, launcherMetrics);
DriverRunnable runnable = new DriverRunnable(jobLauncher, bridge, jobState, callbackDispatcher, jobExec);
return new JobLauncherExecutionDriver(jobSpec, actualLog, runnable);
}
protected JobLauncherExecutionDriver(JobSpec jobSpec, Logger log, DriverRunnable runnable) {
super(runnable);
_closer = Closer.create();
_closer.register(runnable.getJobLauncher());
_log = log;
_jobSpec = jobSpec;
_jobExec = runnable.getJobExec();
_callbackDispatcher = _closer.register(runnable.getCallbackDispatcher());
_jobState = runnable.getJobState();
_executionList = new ExecutionList();
_runnable = runnable;
}
/**
* A runnable that actually executes the job.
*/
@AllArgsConstructor
@Getter
private static class DriverRunnable implements Callable<JobExecutionResult> {
private final JobLauncher jobLauncher;
private final JobListenerToJobStateBridge bridge;
private final JobExecutionState jobState;
private final JobExecutionStateListeners callbackDispatcher;
private final JobExecutionUpdatable jobExec;
@Override
public JobExecutionResult call() throws JobException, InterruptedException, TimeoutException {
jobLauncher.launchJob(bridge);
jobState.awaitForDone(Long.MAX_VALUE);
return JobExecutionResult.createFromState(jobState);
}
}
private static JobLauncher createLauncher(Configurable _sysConfig, JobSpec _jobSpec, Logger _log,
Optional<String> jobLauncherType, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) {
if (jobLauncherType.isPresent()) {
return JobLauncherFactory.newJobLauncher(_sysConfig.getConfigAsProperties(),
_jobSpec.getConfigAsProperties(), jobLauncherType.get(), instanceBroker);
}
else {
_log.info("Creating auto jobLauncher for " + _jobSpec);
try {
return JobLauncherFactory.newJobLauncher(_sysConfig.getConfigAsProperties(),
_jobSpec.getConfigAsProperties(), instanceBroker);
} catch (Exception e) {
throw new RuntimeException("JobLauncher creation failed: " + e, e);
}
}
}
@Override
public JobExecution getJobExecution() {
return _jobExec;
}
@Override
public JobExecutionStatus getJobExecutionStatus() {
return _jobState;
}
protected void startAsync() throws JobException {
_log.info("Starting " + getClass().getSimpleName());
ExecutorsUtils.newThreadFactory(Optional.of(_log), Optional.of("job-launcher-execution-driver")).newThread(this).start();
}
@Override
protected void done() {
_executionList.execute();
try {
shutDown();
} catch (IOException ioe) {
_log.error("Failed to close job launcher.");
}
}
private void shutDown() throws IOException {
_log.info("Shutting down " + getClass().getSimpleName());
if (null != _jobContext) {
switch (_jobContext.getJobState().getState()) {
case PENDING:
case SUCCESSFUL:
case RUNNING: {
// We have to pass another listener instance as launcher does not store the listener used
// in launchJob()
cancel(false);
break;
}
case FAILED:
case COMMITTED:
case CANCELLED: {
// Nothing to do
break;
}
}
}
_closer.close();
}
@Override
public void addListener(Runnable listener, Executor executor) {
_executionList.add(listener, executor);
}
static class JobListenerToJobStateBridge extends AbstractJobListener {
private final JobExecutionState _jobState;
private final boolean _instrumentationEnabled;
private final JobExecutionLauncher.StandardMetrics _launcherMetrics;
private JobContext _jobContext;
public JobListenerToJobStateBridge(Logger log, JobExecutionState jobState,
boolean instrumentationEnabled, JobExecutionLauncher.StandardMetrics launcherMetrics) {
super(Optional.of(log));
_jobState = jobState;
_instrumentationEnabled = instrumentationEnabled;
_launcherMetrics = launcherMetrics;
}
@Override
public void onJobPrepare(JobContext jobContext) throws Exception {
super.onJobPrepare(jobContext);
_jobContext = jobContext;
if (_jobState.getRunningState() == null) {
_jobState.switchToPending();
}
_jobState.switchToRunning();
if (_instrumentationEnabled && null != _launcherMetrics) {
_launcherMetrics.getNumJobsLaunched().mark();
}
}
@Override
public void onJobStart(JobContext jobContext) throws Exception {
super.onJobStart(jobContext);
}
@Override
public void onJobCompletion(JobContext jobContext) throws Exception {
Preconditions.checkArgument(jobContext.getJobState().getState() == RunningState.SUCCESSFUL
|| jobContext.getJobState().getState() == RunningState.COMMITTED
|| jobContext.getJobState().getState() == RunningState.FAILED,
"Unexpected state: " + jobContext.getJobState().getState() + " in " + jobContext);
super.onJobCompletion(jobContext);
if (_instrumentationEnabled && null != _launcherMetrics) {
_launcherMetrics.getNumJobsCompleted().mark();
}
if (jobContext.getJobState().getState() == RunningState.FAILED) {
if (_instrumentationEnabled && null != _launcherMetrics) {
_launcherMetrics.getNumJobsFailed().mark();
}
_jobState.switchToFailed();
}
else {
// TODO Remove next line once the JobLauncher starts sending notifications for success
_jobState.switchToSuccessful();
_jobState.switchToCommitted();
if (_instrumentationEnabled && null != _launcherMetrics) {
_launcherMetrics.getNumJobsCommitted().mark();
}
}
}
@Override
public void onJobCancellation(JobContext jobContext) throws Exception {
super.onJobCancellation(jobContext);
_jobState.switchToCancelled();
if (_instrumentationEnabled && null != _launcherMetrics) {
_launcherMetrics.getNumJobsCancelled().mark();
}
}
}
@VisibleForTesting JobLauncher getLegacyLauncher() {
return _runnable.getJobLauncher();
}
/** {@inheritDoc} */
@Override public void registerStateListener(JobExecutionStateListener listener) {
_callbackDispatcher.registerStateListener(listener);
}
/** {@inheritDoc} */
@Override public void unregisterStateListener(JobExecutionStateListener listener) {
_callbackDispatcher.unregisterStateListener(listener);
}
/** {@inheritDoc} */
@Override public JobExecutionState getJobExecutionState() {
return _jobState;
}
/**
* Creates a new instance of {@link JobLauncherExecutionDriver}.
*
* <p>Conventions
* <ul>
* <li>If no jobLauncherType is specified, one will be determined by the JobSpec
* (see {@link JobLauncherFactory}).
* <li> Convention for sysConfig: use the sysConfig of the gobblinInstance if specified,
* otherwise use empty config.
* <li> Convention for log: use gobblinInstance logger plus "." + jobSpec if specified, otherwise
* use JobExecutionDriver class name plus "." + jobSpec
* </ul>
*/
public static class Launcher implements JobExecutionLauncher, GobblinInstanceEnvironment {
private Optional<JobLauncherType> _jobLauncherType = Optional.absent();
private Optional<Configurable> _sysConfig = Optional.absent();
private Optional<GobblinInstanceEnvironment> _gobblinEnv = Optional.absent();
private Optional<Logger> _log = Optional.absent();
private Optional<MetricContext> _metricContext = Optional.absent();
private Optional<Boolean> _instrumentationEnabled = Optional.absent();
private JobExecutionLauncher.StandardMetrics _metrics;
private Optional<SharedResourcesBroker<GobblinScopeTypes>> _instanceBroker = Optional.absent();
public Launcher() {
}
/** Leave unchanged for */
public Launcher withJobLauncherType(JobLauncherType jobLauncherType) {
Preconditions.checkNotNull(jobLauncherType);
_jobLauncherType = Optional.of(jobLauncherType);
return this;
}
public Optional<JobLauncherType> getJobLauncherType() {
return _jobLauncherType;
}
/** System-wide settings */
public Configurable getDefaultSysConfig() {
return _gobblinEnv.isPresent() ?
_gobblinEnv.get().getSysConfig() :
DefaultConfigurableImpl.createFromConfig(ConfigFactory.empty());
}
@Override
public Configurable getSysConfig() {
if (!_sysConfig.isPresent()) {
_sysConfig = Optional.of(getDefaultSysConfig());
}
return _sysConfig.get();
}
public Launcher withSysConfig(Configurable sysConfig) {
_sysConfig = Optional.of(sysConfig);
return this;
}
/** Parent Gobblin instance */
public Launcher withGobblinInstanceEnvironment(GobblinInstanceEnvironment gobblinInstance) {
_gobblinEnv = Optional.of(gobblinInstance);
return this;
}
public Optional<GobblinInstanceEnvironment> getGobblinInstanceEnvironment() {
return _gobblinEnv;
}
public Logger getLog(JobSpec jobSpec) {
return getJobLogger(getLog(), jobSpec);
}
public Launcher withInstrumentationEnabled(boolean enabled) {
_instrumentationEnabled = Optional.of(enabled);
return this;
}
public boolean getDefaultInstrumentationEnabled() {
return _gobblinEnv.isPresent() ? _gobblinEnv.get().isInstrumentationEnabled() :
GobblinMetrics.isEnabled(getSysConfig().getConfig());
}
@Override
public boolean isInstrumentationEnabled() {
if (!_instrumentationEnabled.isPresent()) {
_instrumentationEnabled = Optional.of(getDefaultInstrumentationEnabled());
}
return _instrumentationEnabled.get();
}
private static Logger getJobLogger(Logger parentLog, JobSpec jobSpec) {
return LoggerFactory.getLogger(parentLog.getName() + "." + jobSpec.toShortString());
}
public Launcher withMetricContext(MetricContext instanceMetricContext) {
_metricContext = Optional.of(instanceMetricContext);
return this;
}
@Override
public MetricContext getMetricContext() {
if (!_metricContext.isPresent()) {
_metricContext = Optional.of(getDefaultMetricContext());
}
return _metricContext.get();
}
public MetricContext getDefaultMetricContext() {
if (_gobblinEnv.isPresent()) {
return _gobblinEnv.get().getMetricContext()
.childBuilder(JobExecutionLauncher.class.getSimpleName()).build();
}
org.apache.gobblin.configuration.State fakeState =
new org.apache.gobblin.configuration.State(getSysConfig().getConfigAsProperties());
List<Tag<?>> tags = new ArrayList<>();
MetricContext res = Instrumented.getMetricContext(fakeState, Launcher.class, tags);
return res;
}
@Override
public JobExecutionMonitor launchJob(JobSpec jobSpec) {
Preconditions.checkNotNull(jobSpec);
if (!(jobSpec instanceof ResolvedJobSpec)) {
try {
jobSpec = new ResolvedJobSpec(jobSpec);
} catch (JobTemplate.TemplateException | SpecNotFoundException exc) {
throw new RuntimeException("Can't launch job " + jobSpec.getUri(), exc);
}
}
JobLauncherExecutionDriver driver = JobLauncherExecutionDriver.create(getSysConfig(), jobSpec, _jobLauncherType,
Optional.of(getLog(jobSpec)), isInstrumentationEnabled(), getMetrics(), getInstanceBroker());
return new JobExecutionMonitorAndDriver(driver);
}
@Override public List<Tag<?>> generateTags(org.apache.gobblin.configuration.State state) {
return Collections.emptyList();
}
@Override public void switchMetricContext(List<Tag<?>> tags) {
throw new UnsupportedOperationException();
}
@Override public void switchMetricContext(MetricContext context) {
throw new UnsupportedOperationException();
}
@Override public String getInstanceName() {
return _gobblinEnv.isPresent() ? _gobblinEnv.get().getInstanceName() : getClass().getName();
}
public Logger getDefaultLog() {
return _gobblinEnv.isPresent() ? _gobblinEnv.get().getLog() : LoggerFactory.getLogger(getClass());
}
@Override public Logger getLog() {
if (! _log.isPresent()) {
_log = Optional.of(getDefaultLog());
}
return _log.get();
}
public Launcher withLog(Logger log) {
_log = Optional.of(log);
return this;
}
@Override public StandardMetrics getMetrics() {
if (_metrics == null) {
_metrics = new JobExecutionLauncher.StandardMetrics(this);
}
return _metrics;
}
public Launcher withInstanceBroker(SharedResourcesBroker<GobblinScopeTypes> broker) {
_instanceBroker = Optional.of(broker);
return this;
}
public SharedResourcesBroker<GobblinScopeTypes> getInstanceBroker() {
if (!_instanceBroker.isPresent()) {
if (_gobblinEnv.isPresent()) {
_instanceBroker = Optional.of(_gobblinEnv.get().getInstanceBroker());
} else {
_instanceBroker = Optional.of(getDefaultInstanceBroker());
}
}
return _instanceBroker.get();
}
public SharedResourcesBroker<GobblinScopeTypes> getDefaultInstanceBroker() {
getLog().warn("Creating a default instance broker for job launcher. Objects may not be shared across all jobs in this instance.");
SharedResourcesBrokerImpl<GobblinScopeTypes> globalBroker =
SharedResourcesBrokerFactory.createDefaultTopLevelBroker(getSysConfig().getConfig(),
GobblinScopeTypes.GLOBAL.defaultScopeInstance());
return globalBroker.newSubscopedBuilder(new SimpleScope<>(GobblinScopeTypes.INSTANCE, getInstanceName())).build();
}
}
/**
* Old {@link JobExecutionLauncher#launchJob(JobSpec)} returns a {@link JobExecutionDriver} but new API returns a {@link JobExecutionMonitor}.
* For backward compatibility we wraps {@link JobExecutionDriver} inside of a new {@link JobExecutionMonitorAndDriver}.
*/
@AllArgsConstructor
public static class JobExecutionMonitorAndDriver implements JobExecutionMonitor {
@Getter
JobLauncherExecutionDriver driver;
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return this.driver.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return this.driver.isCancelled();
}
@Override
public boolean isDone() {
return this.driver.isDone();
}
@Override
public JobExecutionResult get()
throws InterruptedException, ExecutionException {
return this.driver.get();
}
@Override
public JobExecutionResult get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return this.driver.get(timeout, unit);
}
@Override
public MonitoredObject getRunningState() {
return this.driver._jobState.getRunningState();
}
}
@Override public void registerWeakStateListener(JobExecutionStateListener listener) {
_callbackDispatcher.registerWeakStateListener(listener);
}
@Override public boolean isDone() {
RunningState runState = fetchRunningState();
return runState == null ? false : runState.isDone() ;
}
private RunningState fetchRunningState() {
MonitoredObject monitoredObject = getJobExecutionStatus().getRunningState();
if (monitoredObject == null) {
return null;
}
if (!(monitoredObject instanceof RunningState)) {
throw new UnsupportedOperationException("Cannot process monitored object other than " + JobState.RunningState.class.getName());
}
return (RunningState) monitoredObject;
}
@Override public boolean cancel(boolean mayInterruptIfRunning) {
// FIXME there is a race condition here as the job may complete successfully before we
// call cancelJob() below. There isn't an easy way to fix that right now.
RunningState runState = fetchRunningState();
if (runState.isCancelled()) {
return true;
}
else if (runState.isDone()) {
return false;
}
try {
// No special processing of callbacks necessary
getLegacyLauncher().cancelJob(new AbstractJobListener(){});
} catch (JobException e) {
throw new RuntimeException("Unable to cancel job " + _jobSpec + ": " + e, e);
}
return super.cancel(mayInterruptIfRunning);
}
@Override public boolean isCancelled() {
return fetchRunningState().isCancelled();
}
@Override
public JobExecutionResult get()
throws InterruptedException {
try {
return super.get();
} catch (ExecutionException ee) {
return JobExecutionResult.createFailureResult(ee.getCause());
}
}
@Override
public JobExecutionResult get(long timeout, TimeUnit unit)
throws InterruptedException, TimeoutException {
try {
return super.get(timeout, unit);
} catch (ExecutionException ee) {
return JobExecutionResult.createFailureResult(ee.getCause());
}
}
}
| 1,629 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/job_exec/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Standard {@link org.apache.gobblin.runtime.api.JobExecutionLauncher} implementations
*
*/
package org.apache.gobblin.runtime.job_exec;
| 1,630 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/scheduler/PathAlterationListenerAdaptorForMonitor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.listeners.EmailNotificationJobListener;
import org.apache.gobblin.runtime.listeners.RunOnceJobListener;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.SchedulerUtils;
import org.apache.gobblin.util.filesystem.PathAlterationListenerAdaptor;
/**
* Inner subclass of PathAlterationListenerAdaptor for implementation of Listen's methods,
* avoiding anonymous class
*/
public class PathAlterationListenerAdaptorForMonitor extends PathAlterationListenerAdaptor {
private static final Logger LOG = LoggerFactory.getLogger(JobScheduler.class);
Path jobConfigFileDirPath;
JobScheduler jobScheduler;
/** Store path to job mappings. Required for correctly unscheduling. */
private final Map<Path, String> jobNameMap;
private final JobSpecResolver jobSpecResolver;
PathAlterationListenerAdaptorForMonitor(Path jobConfigFileDirPath, JobScheduler jobScheduler) {
this.jobConfigFileDirPath = jobConfigFileDirPath;
this.jobScheduler = jobScheduler;
this.jobNameMap = Maps.newConcurrentMap();
this.jobSpecResolver = jobScheduler.getJobSpecResolver();
}
private Path getJobPath(Properties jobProps) {
return PathUtils.getPathWithoutSchemeAndAuthority(new Path(jobProps.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY)));
}
public void addToJobNameMap(Properties jobProps) {
this.jobNameMap.put(getJobPath(jobProps),
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY));
}
public void loadNewJobConfigAndHandleNewJob(Path path, JobScheduler.Action action) {
// Load the new job configuration and schedule the new job
String customizedInfo = "";
try {
Properties jobProps =
SchedulerUtils.loadGenericJobConfig(this.jobScheduler.properties, path, jobConfigFileDirPath, this.jobSpecResolver);
LOG.debug("Loaded job properties: {}", jobProps);
switch (action) {
case SCHEDULE:
boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
customizedInfo = "schedule";
addToJobNameMap(jobProps);
jobScheduler.scheduleJob(jobProps, runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
break;
case RESCHEDULE:
customizedInfo = "reschedule";
rescheduleJob(jobProps);
break;
case UNSCHEDULE:
throw new RuntimeException("Should not call loadNewJobConfigAndHandleNewJob for unscheduling jobs.");
default:
break;
}
} catch (ConfigurationException | IOException e) {
LOG.error("Failed to load from job configuration file " + path.toString(), e);
} catch (JobException je) {
LOG.error("Failed to " + customizedInfo + " new job loaded from job configuration file " + path.toString(), je);
}
}
public void loadNewCommonConfigAndHandleNewJob(Path path, JobScheduler.Action action) {
String customizedInfoAction = "";
String customizedInfoResult = "";
try {
for (Properties jobProps : SchedulerUtils.loadGenericJobConfigs(jobScheduler.properties, path,
jobConfigFileDirPath, this.jobSpecResolver)) {
try {
switch (action) {
case SCHEDULE:
boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
customizedInfoAction = "schedule";
customizedInfoResult = "creation or equivalent action";
addToJobNameMap(jobProps);
jobScheduler.scheduleJob(jobProps,
runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
break;
case RESCHEDULE:
customizedInfoAction = "reschedule";
customizedInfoResult = "change";
rescheduleJob(jobProps);
break;
case UNSCHEDULE:
throw new RuntimeException("Should not call loadNewCommonConfigAndHandleNewJob for unscheduling jobs.");
default:
break;
}
} catch (JobException je) {
LOG.error(
"Failed to " + customizedInfoAction + " job reloaded from job configuration file " + jobProps.getProperty(
ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY), je);
}
}
} catch (ConfigurationException | IOException e) {
LOG.error(
"Failed to reload job configuration files affected by " + customizedInfoResult + " to " + path.toString(), e);
}
}
@Override
public void onFileCreate(Path path) {
String fileExtension = path.getName().substring(path.getName().lastIndexOf('.') + 1);
String noExtFileName = path.getName().substring(0, path.getName().lastIndexOf('.'));
if (fileExtension.equalsIgnoreCase(SchedulerUtils.JOB_PROPS_FILE_EXTENSION)) {
//check no other properties pre-existed
try {
if (checkCommonPropExistance(path.getParent(), noExtFileName)) {
return;
}
} catch (IOException e) {
e.printStackTrace();
}
LOG.info("Detected creation of common properties file" + path.toString());
// New .properties file founded with some new attributes, reschedule jobs.
loadNewCommonConfigAndHandleNewJob(path, JobScheduler.Action.RESCHEDULE);
return;
}
if (!jobScheduler.jobConfigFileExtensions.contains(fileExtension)) {
return;
}
LOG.info("Detected new job configuration file " + path.toString());
loadNewJobConfigAndHandleNewJob(path, JobScheduler.Action.SCHEDULE);
}
/**
* Called when a job configuration file is changed.
*/
@Override
public void onFileChange(Path path) {
String fileExtension = path.getName().substring(path.getName().lastIndexOf('.') + 1);
if (fileExtension.equalsIgnoreCase(SchedulerUtils.JOB_PROPS_FILE_EXTENSION)) {
LOG.info("Detected change to common properties file " + path.toString());
loadNewCommonConfigAndHandleNewJob(path, JobScheduler.Action.RESCHEDULE);
return;
}
if (!jobScheduler.jobConfigFileExtensions.contains(fileExtension)) {
// Not a job configuration file, ignore.
return;
}
LOG.info("Detected change to job configuration file " + path.toString());
loadNewJobConfigAndHandleNewJob(path, JobScheduler.Action.RESCHEDULE);
}
/**
* Called when a job configuration file is deleted.
*/
@Override
public void onFileDelete(Path path) {
String fileExtension = path.getName().substring(path.getName().lastIndexOf('.') + 1);
if (fileExtension.equalsIgnoreCase(SchedulerUtils.JOB_PROPS_FILE_EXTENSION)) {
LOG.info("Detected deletion of common properties file " + path.toString());
// For JobProps, deletion in local folder means inheritance from ancestor folder and reschedule.
loadNewCommonConfigAndHandleNewJob(path, JobScheduler.Action.RESCHEDULE);
return;
}
if (!jobScheduler.jobConfigFileExtensions.contains(fileExtension)) {
// Not a job configuration file, ignore.
return;
}
LOG.info("Detected deletion of job configuration file " + path.toString());
// As for normal job file, deletion means unschedule
unscheduleJobAtPath(path);
}
private void unscheduleJobAtPath(Path path) {
try {
Path pathWithoutSchemeOrAuthority = PathUtils.getPathWithoutSchemeAndAuthority(path);
String jobName = this.jobNameMap.get(pathWithoutSchemeOrAuthority);
if (jobName == null) {
LOG.info("Could not find a scheduled job to unschedule with path " + pathWithoutSchemeOrAuthority);
return;
}
LOG.info("Unscheduling job " + jobName);
this.jobScheduler.unscheduleJob(jobName);
this.jobNameMap.remove(pathWithoutSchemeOrAuthority);
} catch (JobException je) {
LOG.error("Could not unschedule job " + this.jobNameMap.get(path));
}
}
private void rescheduleJob(Properties jobProps)
throws JobException {
String jobName = jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
Path jobPath = getJobPath(jobProps);
// First unschedule and delete the old job
if (this.jobNameMap.containsKey(jobPath)) {
jobScheduler.unscheduleJob(this.jobNameMap.get(jobPath));
this.jobNameMap.remove(jobPath);
}
boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
// Reschedule the job with the new job configuration
jobScheduler.scheduleJob(jobProps, runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
addToJobNameMap(jobProps);
LOG.debug("[JobScheduler] The new job " + jobName + " is rescheduled.");
}
/**
* Given the target rootPath, check if there's common properties existed. Return false if so.
* @param rootPath
* @return
*/
private boolean checkCommonPropExistance(Path rootPath, String noExtFileName)
throws IOException {
Configuration conf = new Configuration();
FileStatus[] children = rootPath.getFileSystem(conf).listStatus(rootPath);
for (FileStatus aChild : children) {
if (aChild.getPath().getName().contains(noExtFileName)) {
return false;
}
}
return true;
}
}
| 1,631 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/scheduler/SchedulerService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import java.util.Properties;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.impl.StdSchedulerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PropertiesUtils;
/**
* A {@link com.google.common.util.concurrent.Service} wrapping a Quartz {@link Scheduler} allowing correct shutdown
* of the scheduler when {@link JobScheduler} fails to initialize.
*/
@Singleton
public class SchedulerService extends AbstractIdleService {
@Getter
private Scheduler scheduler;
private final boolean waitForJobCompletion;
private final Optional<Properties> quartzProps;
public SchedulerService(boolean waitForJobCompletion, Optional<Properties> quartzConfig) {
this.waitForJobCompletion = waitForJobCompletion;
this.quartzProps = quartzConfig;
}
public SchedulerService(Properties props) {
this(Boolean.parseBoolean(
props.getProperty(ConfigurationKeys.SCHEDULER_WAIT_FOR_JOB_COMPLETION_KEY,
ConfigurationKeys.DEFAULT_SCHEDULER_WAIT_FOR_JOB_COMPLETION)),
Optional.of(PropertiesUtils.extractPropertiesWithPrefix(props, Optional.of("org.quartz."))));
}
@Inject
public SchedulerService(Config cfg) {
this(cfg.hasPath(ConfigurationKeys.SCHEDULER_WAIT_FOR_JOB_COMPLETION_KEY) ?
cfg.getBoolean(ConfigurationKeys.SCHEDULER_WAIT_FOR_JOB_COMPLETION_KEY) :
Boolean.parseBoolean(ConfigurationKeys.DEFAULT_SCHEDULER_WAIT_FOR_JOB_COMPLETION),
Optional.of(ConfigUtils.configToProperties(cfg, "org.quartz.")));
}
@Override protected void startUp() throws SchedulerException {
StdSchedulerFactory schedulerFactory = new StdSchedulerFactory();
if (this.quartzProps.isPresent() && this.quartzProps.get().size() > 0) {
schedulerFactory.initialize(this.quartzProps.get());
}
this.scheduler = schedulerFactory.getScheduler();
this.scheduler.start();
}
@Override protected void shutDown() throws SchedulerException {
this.scheduler.shutdown(this.waitForJobCompletion);
}
}
| 1,632 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/scheduler/SchedulerDaemon.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import java.util.Properties;
import java.util.UUID;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.PropertiesConfiguration;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.util.PropertiesUtils;
/**
* A class that runs the {@link JobScheduler} in a daemon process for standalone deployment.
*
* @author Yinan Li
*/
@Slf4j
public class SchedulerDaemon extends ServiceBasedAppLauncher {
private SchedulerDaemon(Properties defaultProperties, Properties customProperties) throws Exception {
this(PropertiesUtils.combineProperties(defaultProperties, customProperties));
}
public SchedulerDaemon(Properties properties) throws Exception {
super(properties, getAppName(properties));
SchedulerService schedulerService = new SchedulerService(properties);
addService(schedulerService);
addService(new JobScheduler(properties, schedulerService));
}
private static String getAppName(Properties properties) {
return properties.getProperty(ServiceBasedAppLauncher.APP_NAME, "SchedulerDaemon-" + UUID.randomUUID());
}
public static void main(String[] args)
throws Exception {
if (args.length < 1 || args.length > 2) {
System.err.println(
"Usage: SchedulerDaemon <default configuration properties file> [custom configuration properties file]");
System.exit(1);
}
// Load default framework configuration properties
Properties defaultProperties = ConfigurationConverter.getProperties(new PropertiesConfiguration(args[0]));
// Load custom framework configuration properties (if any)
Properties customProperties = new Properties();
if (args.length == 2) {
customProperties.putAll(ConfigurationConverter.getProperties(new PropertiesConfiguration(args[1])));
}
log.debug("Scheduler Daemon::main starting with defaultProperties: {}, customProperties: {}", defaultProperties,
customProperties);
// Start the scheduler daemon
new SchedulerDaemon(defaultProperties, customProperties).start();
}
}
| 1,633 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/scheduler/BaseGobblinJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import org.quartz.*;
import org.slf4j.MDC;
import java.util.Map;
public abstract class BaseGobblinJob implements Job {
private final Map<String, String> mdcContext;
public BaseGobblinJob() {
this.mdcContext = MDC.getCopyOfContextMap();
}
/**
* <p>
* Called by the <code>{@link Scheduler}</code> when a <code>{@link Trigger}</code>
* fires that is associated with the <code>Job</code>.
* </p>
*
* <p>
* The implementation may wish to set a
* {@link JobExecutionContext#setResult(Object) result} object on the
* {@link JobExecutionContext} before this method exits. The result itself
* is meaningless to Quartz, but may be informative to
* <code>{@link JobListener}s</code> or
* <code>{@link TriggerListener}s</code> that are watching the job's
* execution.
* </p>
*
* @throws JobExecutionException if there is an exception while executing the job.
*/
@Override
public void execute(JobExecutionContext context) throws JobExecutionException {
Map<String, String> originalContext = MDC.getCopyOfContextMap();
if (this.mdcContext != null) {
MDC.setContextMap(this.mdcContext);
}
try {
executeImpl(context);
} finally {
if (originalContext != null) {
MDC.setContextMap(originalContext);
} else {
MDC.clear();
}
}
}
/**
* <p>
* Called by the <code>{@link Scheduler}</code> when a <code>{@link Trigger}</code>
* fires that is associated with the <code>Job</code>.
* </p>
*
* <p>
* The implementation may wish to set a
* {@link JobExecutionContext#setResult(Object) result} object on the
* {@link JobExecutionContext} before this method exits. The result itself
* is meaningless to Quartz, but may be informative to
* <code>{@link JobListener}s</code> or
* <code>{@link TriggerListener}s</code> that are watching the job's
* execution.
* </p>
*
* @throws JobExecutionException if there is an exception while executing the job.
*/
protected abstract void executeImpl(JobExecutionContext context) throws JobExecutionException;
} | 1,634 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/scheduler/JobScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.scheduler;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.fs.Path;
import org.quartz.CronScheduleBuilder;
import org.quartz.DisallowConcurrentExecution;
import org.quartz.InterruptableJob;
import org.quartz.Job;
import org.quartz.JobBuilder;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.JobKey;
import org.quartz.SchedulerException;
import org.quartz.Trigger;
import org.quartz.TriggerBuilder;
import org.quartz.UnableToInterruptJobException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.AbstractIdleService;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.runtime.listeners.EmailNotificationJobListener;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.runtime.listeners.RunOnceJobListener;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.SchedulerUtils;
import org.apache.gobblin.util.filesystem.PathAlterationObserverScheduler;
/**
* Gobblin job scheduler.
*
* <p>
* The scheduler is a pure scheduler in the sense that it is only responsible
* for scheduling Gobblin jobs. Job state tracking and monitoring are handled
* by the {@link JobLauncher}.
* </p>
*
* <p>
* For job scheduling, This class uses a Quartz {@link org.quartz.Scheduler}.
* Each job is associated with a cron schedule that is used to create a
* {@link org.quartz.Trigger} for the job.
* </p>
*
* @author Yinan Li
*/
public class JobScheduler extends AbstractIdleService {
private static final Logger LOG = LoggerFactory.getLogger(JobScheduler.class);
public enum Action {
SCHEDULE, RESCHEDULE, UNSCHEDULE
}
public static final String JOB_SCHEDULER_KEY = "jobScheduler";
public static final String PROPERTIES_KEY = "jobProps";
public static final String JOB_LISTENER_KEY = "jobListener";
// System configuration properties
public final Properties properties;
// A Quartz scheduler
private final SchedulerService scheduler;
// A thread pool executor for running jobs without schedules
protected final ExecutorService jobExecutor;
// Mapping between jobs to job listeners associated with them
private final Map<String, JobListener> jobListenerMap = Maps.newHashMap();
// A map for all scheduled jobs
private final Map<String, JobKey> scheduledJobs = Maps.newHashMap();
// Set of supported job configuration file extensions
public final Set<String> jobConfigFileExtensions;
public final Path jobConfigFileDirPath;
// A monitor for changes to job conf files for general FS
public final PathAlterationObserverScheduler pathAlterationDetector;
public final PathAlterationListenerAdaptorForMonitor listener;
// A period of time for scheduler to wait until jobs are finished
private final boolean waitForJobCompletion;
private final Closer closer = Closer.create();
@Getter
private final JobSpecResolver jobSpecResolver;
@Getter
private volatile boolean cancelRequested = false;
public JobScheduler(Properties properties, SchedulerService scheduler)
throws Exception {
this.properties = properties;
this.scheduler = scheduler;
this.jobExecutor = Executors.newFixedThreadPool(Integer.parseInt(
properties.getProperty(ConfigurationKeys.JOB_EXECUTOR_THREAD_POOL_SIZE_KEY,
Integer.toString(ConfigurationKeys.DEFAULT_JOB_EXECUTOR_THREAD_POOL_SIZE))),
ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("JobScheduler-%d")));
this.jobConfigFileExtensions = Sets.newHashSet(Splitter.on(",")
.omitEmptyStrings()
.split(this.properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_EXTENSIONS_KEY,
ConfigurationKeys.DEFAULT_JOB_CONFIG_FILE_EXTENSIONS)));
long pollingInterval = Long.parseLong(
this.properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL_KEY,
Long.toString(ConfigurationKeys.DEFAULT_JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL)));
this.pathAlterationDetector = new PathAlterationObserverScheduler(pollingInterval);
this.waitForJobCompletion = Boolean.parseBoolean(
this.properties.getProperty(ConfigurationKeys.SCHEDULER_WAIT_FOR_JOB_COMPLETION_KEY,
ConfigurationKeys.DEFAULT_SCHEDULER_WAIT_FOR_JOB_COMPLETION));
this.jobSpecResolver = JobSpecResolver.builder(ConfigUtils.propertiesToConfig(properties)).build();
if (this.properties.containsKey(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY)) {
this.jobConfigFileDirPath = new Path(this.properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY));
this.listener = new PathAlterationListenerAdaptorForMonitor(jobConfigFileDirPath, this);
} else {
// This is needed because HelixJobScheduler does not use the same way of finding changed paths
this.jobConfigFileDirPath = null;
this.listener = null;
}
}
@Override
protected void startUp()
throws Exception {
LOG.info("Starting the job scheduler");
try {
this.scheduler.awaitRunning(30, TimeUnit.SECONDS);
} catch (TimeoutException | IllegalStateException exc) {
throw new IllegalStateException("Scheduler service is not running.");
}
// Note: This should not be mandatory, gobblin-cluster modes have their own job configuration managers
if (this.properties.containsKey(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY)
|| this.properties.containsKey(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY)) {
if (this.properties.containsKey(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY) && !this.properties.containsKey(
ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY)) {
this.properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
"file://" + this.properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY));
}
startServices();
}
}
protected void startServices() throws Exception {
startGeneralJobConfigFileMonitor();
scheduleGeneralConfiguredJobs();
}
@Override
protected void shutDown()
throws Exception {
LOG.info("Stopping the job scheduler");
closer.close();
cancelRequested = true;
List<JobExecutionContext> currentExecutions = this.scheduler.getScheduler().getCurrentlyExecutingJobs();
for (JobExecutionContext jobExecutionContext : currentExecutions) {
try {
this.scheduler.getScheduler().interrupt(jobExecutionContext.getFireInstanceId());
} catch (UnableToInterruptJobException e) {
LOG.error("Failed to cancel job " + jobExecutionContext.getJobDetail().getKey(), e);
}
}
ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG));
}
/**
* Schedule a job.
*
* <p>
* This method calls the Quartz scheduler to scheduler the job.
* </p>
*
* @param jobProps Job configuration properties
* @param jobListener {@link JobListener} used for callback,
* can be <em>null</em> if no callback is needed.
* @throws JobException when there is anything wrong
* with scheduling the job
*/
public void scheduleJob(Properties jobProps, JobListener jobListener)
throws JobException {
try {
scheduleJob(jobProps, jobListener, Maps.<String, Object>newHashMap(), GobblinJob.class);
} catch (JobException | RuntimeException exc) {
LOG.error("Could not schedule job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY, "Unknown job"), exc);
}
}
/**
* Schedule a job immediately.
*
* <p>
* This method calls the Quartz scheduler to scheduler the job.
* </p>
*
* @param jobProps Job configuration properties
* @param jobListener {@link JobListener} used for callback,
* can be <em>null</em> if no callback is needed.
* @throws JobException when there is anything wrong
* with scheduling the job
*/
public Future<?> scheduleJobImmediately(Properties jobProps, JobListener jobListener, JobLauncher jobLauncher) {
Callable<Void> callable = new Callable<Void>() {
@Override
public Void call() throws JobException {
try {
runJob(jobProps, jobListener, jobLauncher);
} catch (JobException je) {
LOG.error("Failed to run job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), je);
throw je;
}
return null;
}
};
final Future<?> future = this.jobExecutor.submit(callable);
return new Future() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (!cancelRequested) {
return false;
}
boolean result = true;
try {
jobLauncher.cancelJob(jobListener);
} catch (JobException e) {
LOG.error("Failed to cancel job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e);
result = false;
}
if (mayInterruptIfRunning) {
result &= future.cancel(true);
}
return result;
}
@Override
public boolean isCancelled() {
return future.isCancelled();
}
@Override
public boolean isDone() {
return future.isDone();
}
@Override
public Object get() throws InterruptedException, ExecutionException {
return future.get();
}
@Override
public Object get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return future.get(timeout, unit);
}
};
}
public Future<?> scheduleJobImmediately(Properties jobProps, JobListener jobListener) throws JobException {
try {
return scheduleJobImmediately(jobProps, jobListener, buildJobLauncher(jobProps));
} catch (Exception e) {
throw new JobException("Job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY) + " cannot be immediately scheduled.", e);
}
}
/**
* Submit a runnable to the {@link ExecutorService} of this {@link JobScheduler}.
* @param runnable the runnable to submit to the job executor
*/
public void submitRunnableToExecutor(Runnable runnable) {
this.jobExecutor.execute(runnable);
}
/**
* Schedule a job.
*
* <p>
* This method does what {@link #scheduleJob(Properties, JobListener)} does, and additionally it
* allows the caller to pass in additional job data and the {@link Job} implementation class.
* </p>
*
* @param jobProps Job configuration properties
* @param jobListener {@link JobListener} used for callback,
* can be <em>null</em> if no callback is needed.
* @param additionalJobData additional job data in a {@link Map}
* @param jobClass Quartz job class
* @throws JobException when there is anything wrong
* with scheduling the job
*/
public void scheduleJob(Properties jobProps, JobListener jobListener, Map<String, Object> additionalJobData,
Class<? extends Job> jobClass)
throws JobException {
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.JOB_NAME_KEY),
"A job must have a job name specified by job.name");
String jobName = jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
if (this.scheduledJobs.containsKey(jobName)) {
LOG.info("Job " + jobName + " was already scheduled, un-scheduling it now.");
unscheduleJob(jobName);
}
// Check if the job has been disabled
boolean disabled = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_DISABLED_KEY, "false"));
if (disabled) {
LOG.info("Skipping disabled job " + jobName);
return;
}
if (!jobProps.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) {
// Submit the job to run
this.jobExecutor.execute(new NonScheduledJobRunner(jobProps, jobListener));
return;
}
if (jobListener != null) {
this.jobListenerMap.put(jobName, jobListener);
}
// Build a data map that gets passed to the job
JobDataMap jobDataMap = new JobDataMap();
jobDataMap.put(JOB_SCHEDULER_KEY, this);
jobDataMap.put(PROPERTIES_KEY, jobProps);
jobDataMap.put(JOB_LISTENER_KEY, jobListener);
jobDataMap.putAll(additionalJobData);
// Build a Quartz job
JobDetail job = JobBuilder.newJob(jobClass)
.withIdentity(jobName, Strings.nullToEmpty(jobProps.getProperty(ConfigurationKeys.JOB_GROUP_KEY)))
.withDescription(Strings.nullToEmpty(jobProps.getProperty(ConfigurationKeys.JOB_DESCRIPTION_KEY)))
.usingJobData(jobDataMap)
.build();
try {
// Schedule the Quartz job with a trigger built from the job configuration
Trigger trigger = createTriggerForJob(job.getKey(), jobProps, Optional.absent());
this.scheduler.getScheduler().scheduleJob(job, trigger);
logNewlyScheduledJob(job, trigger);
} catch (SchedulerException se) {
LOG.error("Failed to schedule job " + jobName, se);
throw new JobException("Failed to schedule job " + jobName, se);
}
this.scheduledJobs.put(jobName, job.getKey());
}
protected void logNewlyScheduledJob(JobDetail job, Trigger trigger) {
LOG.info(String.format("Scheduled job %s. Next run: %s.", job.getKey(), trigger.getNextFireTime()));
}
/**
* Unschedule and delete a job.
*
* @param jobName Job name
* @throws JobException when there is anything wrong unschedule the job
*/
public void unscheduleJob(String jobName)
throws JobException {
if (this.scheduledJobs.containsKey(jobName)) {
try {
this.scheduler.getScheduler().deleteJob(this.scheduledJobs.remove(jobName));
} catch (SchedulerException se) {
LOG.error("Failed to unschedule and delete job " + jobName, se);
throw new JobException("Failed to unschedule and delete job " + jobName, se);
}
}
}
public void unscheduleAllJobs() throws SchedulerException {
this.scheduler.getScheduler().clear();
}
/**
* Run a job.
*
* <p>
* This method runs the job immediately without going through the Quartz scheduler.
* This is particularly useful for testing.
* </p>
*
* @param jobProps Job configuration properties
* @param jobListener {@link JobListener} used for callback, can be <em>null</em> if no callback is needed.
* @throws JobException when there is anything wrong with running the job
*/
public void runJob(Properties jobProps, JobListener jobListener)
throws JobException {
try {
runJob(jobProps, jobListener, buildJobLauncher(jobProps));
} catch (Exception e) {
throw new JobException("Failed to run job " + jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), e);
}
}
public JobLauncher buildJobLauncher(Properties jobProps) throws Exception {
return JobLauncherFactory.newJobLauncher(this.properties, jobProps);
}
/**
* Run a job.
*
* <p>
* This method runs the job immediately without going through the Quartz scheduler.
* This is particularly useful for testing.
* </p>
*
* <p>
* This method does what {@link #runJob(Properties, JobListener)} does, and additionally it allows
* the caller to pass in a {@link JobLauncher} instance used to launch the job to run.
* </p>
*
* @param jobProps Job configuration properties
* @param jobListener {@link JobListener} used for callback, can be <em>null</em> if no callback is needed.
* @param jobLauncher a {@link JobLauncher} object used to launch the job to run
* @return If current job is a stop-early job based on {@link Source#isEarlyStopped()}
* @throws JobException when there is anything wrong with running the job
*/
public boolean runJob(Properties jobProps, JobListener jobListener, JobLauncher jobLauncher)
throws JobException {
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.JOB_NAME_KEY),
"A job must have a job name specified by job.name");
String jobName = jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
// Check if the job has been disabled
boolean disabled = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_DISABLED_KEY, "false"));
if (disabled) {
LOG.info("Skipping disabled job " + jobName);
return false;
}
// Launch the job
try (Closer closer = Closer.create()) {
closer.register(jobLauncher).launchJob(jobListener);
boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
boolean isEarlyStopped = jobLauncher.isEarlyStopped();
if (!isEarlyStopped && runOnce && this.scheduledJobs.containsKey(jobName)) {
this.scheduler.getScheduler().deleteJob(this.scheduledJobs.remove(jobName));
}
return isEarlyStopped;
} catch (Throwable t) {
throw new JobException("Failed to launch and run job " + jobName, t);
}
}
/**
* Get the names of the scheduled jobs.
*
* @return names of the scheduled jobs
*/
public Collection<String> getScheduledJobs() {
return this.scheduledJobs.keySet();
}
/**
* Schedule Gobblin jobs in general position
*/
private void scheduleGeneralConfiguredJobs()
throws ConfigurationException, JobException, IOException {
LOG.info("Scheduling configured jobs");
for (Properties jobProps : loadGeneralJobConfigs()) {
if (!jobProps.containsKey(ConfigurationKeys.JOB_SCHEDULE_KEY)) {
// A job without a cron schedule is considered a one-time job
jobProps.setProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "true");
}
boolean runOnce = Boolean.valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
scheduleJob(jobProps, runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
this.listener.addToJobNameMap(jobProps);
}
}
/**
* Load job configuration file(s) from general source
*/
private List<Properties> loadGeneralJobConfigs()
throws ConfigurationException, IOException {
List<Properties> jobConfigs = SchedulerUtils.loadGenericJobConfigs(this.properties, this.jobSpecResolver);
LOG.info(String.format("Loaded %d job configurations", jobConfigs.size()));
return jobConfigs;
}
/**
* Start the job configuration file monitor using generic file system API.
*
* <p>
* The job configuration file monitor currently only supports monitoring the following types of changes:
*
* <ul>
* <li>New job configuration files.</li>
* <li>Changes to existing job configuration files.</li>
* <li>Changes to existing common properties file with a .properties extension.</li>
* <li>Deletion to existing job configuration files.</li>
* <li>Deletion to existing common properties file with a .properties extension.</li>
* </ul>
* </p>
*
* <p>
* This monitor has one limitation: in case more than one file including at least one common properties
* file are changed between two adjacent checks, the reloading of affected job configuration files may
* be intermixed and applied in an order that is not desirable. This is because the order the listener
* is called on the changes is not controlled by Gobblin, but instead by the monitor itself.
* </p>
*/
private void startGeneralJobConfigFileMonitor()
throws Exception {
SchedulerUtils.addPathAlterationObserver(this.pathAlterationDetector, this.listener, jobConfigFileDirPath);
this.pathAlterationDetector.start();
this.closer.register(new Closeable() {
@Override
public void close() throws IOException {
try {
pathAlterationDetector.stop(1000);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
});
}
/**
* Get a {@link org.quartz.Trigger} from the given job configuration properties. If triggerSuffix is provided, appends
* it to the end of the flow name. The suffix is used to add multiple unique triggers associated with the same job
*/
public static Trigger createTriggerForJob(JobKey jobKey, Properties jobProps, Optional<String> triggerSuffix) {
// Build a trigger for the job with the given cron-style schedule
return TriggerBuilder.newTrigger()
.withIdentity(jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY)
+ triggerSuffix.transform(s -> "_" + s).or(""),
Strings.nullToEmpty(jobProps.getProperty(ConfigurationKeys.JOB_GROUP_KEY)))
.forJob(jobKey)
.withSchedule(CronScheduleBuilder.cronSchedule(jobProps.getProperty(ConfigurationKeys.JOB_SCHEDULE_KEY)))
.build();
}
/**
* A Gobblin job to be scheduled.
*/
@DisallowConcurrentExecution
@Slf4j
public static class GobblinJob extends BaseGobblinJob implements InterruptableJob {
@Override
public void executeImpl(JobExecutionContext context)
throws JobExecutionException {
JobDetail jobDetail = context.getJobDetail();
LOG.info("Starting job " + jobDetail.getKey());
JobDataMap dataMap = jobDetail.getJobDataMap();
JobScheduler jobScheduler = (JobScheduler) dataMap.get(JOB_SCHEDULER_KEY);
Properties jobProps = (Properties) dataMap.get(PROPERTIES_KEY);
JobListener jobListener = (JobListener) dataMap.get(JOB_LISTENER_KEY);
try {
jobScheduler.runJob(jobProps, jobListener);
} catch (Throwable t) {
throw new JobExecutionException(t);
}
}
@Override
public void interrupt()
throws UnableToInterruptJobException {
log.info("Job was interrupted");
}
}
/**
* A class for running non-scheduled Gobblin jobs.
*/
class NonScheduledJobRunner implements Runnable {
private final Properties jobProps;
private final JobListener jobListener;
public NonScheduledJobRunner(Properties jobProps, JobListener jobListener) {
this.jobProps = jobProps;
this.jobListener = jobListener;
}
@Override
public void run() {
try {
JobScheduler.this.runJob(this.jobProps, this.jobListener);
} catch (JobException je) {
LOG.error("Failed to run job " + this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), je);
}
}
}
}
| 1,635 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/FlowStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.Iterator;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.ToString;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.service.ExecutionStatus;
/**
* Represents status of a flow.
*/
@Alpha
@AllArgsConstructor
@Getter
@ToString
public class FlowStatus {
private final String flowName;
private final String flowGroup;
private final long flowExecutionId;
@ToString.Exclude // (to avoid side-effecting exhaustion of `Iterator`)
private final Iterator<JobStatus> jobStatusIterator;
private final ExecutionStatus flowExecutionStatus;
}
| 1,636 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/LatestFlowExecutionIdTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.List;
/**
* Tracks the latest flow execution Id.
*/
public interface LatestFlowExecutionIdTracker {
/**
* @param flowName
* @param flowGroup
* @return the latest flow execution id with the given flowName and flowGroup. -1 will be returned if no such execution found.
*/
long getLatestExecutionIdForFlow(String flowName, String flowGroup);
/**
* @param flowName
* @param flowGroup
* @param count number of execution ids to return
* @return the latest <code>count</code> execution ids with the given flowName and flowGroup. null will be returned if no such execution found.
*/
List<Long> getLatestExecutionIdsForFlow(String flowName, String flowGroup, int count);
}
| 1,637 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/ResumeFlowEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import lombok.AllArgsConstructor;
import lombok.Data;
@AllArgsConstructor
@Data
public class ResumeFlowEvent {
private String flowGroup;
private String flowName;
private Long flowExecutionId;
}
| 1,638 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/JobStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.List;
import com.google.common.base.Supplier;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
import org.apache.gobblin.runtime.troubleshooter.Issue;
/**
* Contains attributes that describe job status.
*/
@Builder
@Getter
@ToString
public class JobStatus {
private final String jobName;
private final String jobGroup;
private final String jobTag;
private final long jobExecutionId;
private final long flowExecutionId;
private final String flowName;
private final String flowGroup;
private final String eventName;
private final long orchestratedTime;
private final long startTime;
private final long endTime;
@Setter
private String metrics;
private final String message;
private final long processedCount;
private final String lowWatermark;
private final String highWatermark;
private final int maxAttempts;
private final int currentAttempts;
private final int currentGeneration;
private final boolean shouldRetry;
private final Supplier<List<Issue>> issues;
private final int progressPercentage;
private final long lastProgressEventTime;
}
| 1,639 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/FlowStatusGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import javax.inject.Inject;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.service.ExecutionStatus;
/**
* Generator for {@link FlowStatus}, which relies on a {@link JobStatusRetriever}.
*/
@Alpha
public class FlowStatusGenerator {
public static final List<String> FINISHED_STATUSES = Lists.newArrayList("FAILED", "COMPLETE", "CANCELLED");
public static final int MAX_LOOKBACK = 100;
private final JobStatusRetriever jobStatusRetriever;
@Inject
public FlowStatusGenerator(JobStatusRetriever jobStatusRetriever) {
this.jobStatusRetriever = jobStatusRetriever;
}
/**
* Get the flow statuses of last <code>count</code> (or fewer) executions
* @param flowName
* @param flowGroup
* @param count
* @param tag
* @return the latest <code>count</code>{@link FlowStatus}es. null is returned if there is no flow execution found.
* If tag is not null, the job status list only contains jobs matching the tag.
*/
public List<FlowStatus> getLatestFlowStatus(String flowName, String flowGroup, int count, String tag) {
List<Long> flowExecutionIds = jobStatusRetriever.getLatestExecutionIdsForFlow(flowName, flowGroup, count);
if (flowExecutionIds == null || flowExecutionIds.isEmpty()) {
return null;
}
return flowExecutionIds.stream().map(flowExecutionId -> getFlowStatus(flowName, flowGroup, flowExecutionId, tag))
.collect(Collectors.toList());
}
/**
* Get the flow statuses of last <code>count</code> (or fewer) executions
* @param flowName
* @param flowGroup
* @param count
* @param tag
* @param executionStatus
* @return the latest <code>count</code>{@link FlowStatus}es. null is returned if there is no flow execution found.
* If tag is not null, the job status list only contains jobs matching the tag.
* If executionStatus is not null, the latest <code>count</code> flow statuses with that status are returned (as long
* as they are within the last {@link #MAX_LOOKBACK} executions for this flow).
*/
public List<FlowStatus> getLatestFlowStatus(String flowName, String flowGroup, int count, String tag, String executionStatus) {
if (executionStatus == null) {
return getLatestFlowStatus(flowName, flowGroup, count, tag);
} else {
List<FlowStatus> flowStatuses = getLatestFlowStatus(flowName, flowGroup, MAX_LOOKBACK, tag);
if (flowStatuses == null) {
return null;
}
List<FlowStatus> matchingFlowStatuses = new ArrayList<>();
for (FlowStatus flowStatus : flowStatuses) {
if (matchingFlowStatuses.size() == count) {
return matchingFlowStatuses;
}
if (flowStatus.getFlowExecutionStatus().name().equals(executionStatus)) {
matchingFlowStatuses.add(flowStatus);
}
}
return matchingFlowStatuses;
}
}
/**
* Get the flow status for a specific execution.
* @param flowName
* @param flowGroup
* @param flowExecutionId
* @param tag String to filter the returned job statuses
* @return the flow status, null is returned if the flow status does not exist. If tag is not null, the job status
* list only contains jobs matching the tag.
*/
public FlowStatus getFlowStatus(String flowName, String flowGroup, long flowExecutionId, String tag) {
List<JobStatus> jobStatuses = ImmutableList.copyOf(retainStatusOfAnyFlowOrJobMatchingTag(
jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId), tag));
ExecutionStatus flowExecutionStatus =
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.getDagManagerEnabled(), jobStatuses.iterator());
return jobStatuses.iterator().hasNext()
? new FlowStatus(flowName, flowGroup, flowExecutionId, jobStatuses.iterator(), flowExecutionStatus) : null;
}
/**
* Get the flow status for executions of every flow within the flow group.
* @param flowGroup
* @param countPerFlowName (maximum) number of flow statuses per named flow in group
* @param tag String to filter the returned job statuses
* @return the latest (up to <code>countPerFlowName</code>, per flow) {@link FlowStatus}es. null is returned if there is no
* flow or no flow execution found.
* If tag is not null, the job status list only contains jobs matching the tag.
*
* NOTE: filtering by flow `executionStatus` not presently offered, until use case justified.
*/
public List<FlowStatus> getFlowStatusesAcrossGroup(String flowGroup, int countPerFlowName, String tag) {
List<FlowStatus> flowStatuses = jobStatusRetriever.getFlowStatusesForFlowGroupExecutions(flowGroup, countPerFlowName);
return flowStatuses.stream().flatMap(fs -> {
Iterator<JobStatus> filteredJobStatuses = retainStatusOfAnyFlowOrJobMatchingTag(fs.getJobStatusIterator(), tag);
return filteredJobStatuses.hasNext() ?
Stream.of(new FlowStatus(fs.getFlowName(), fs.getFlowGroup(), fs.getFlowExecutionId(), filteredJobStatuses,
fs.getFlowExecutionStatus())) :
Stream.empty();
}).collect(Collectors.toList());
}
/**
* Return true if another instance of a flow is running. A flow is determined to be in the RUNNING state, if any of the
* jobs in the flow are in the RUNNING state.
* @param flowName
* @param flowGroup
* @return true, if any jobs of the flow are RUNNING.
*/
public boolean isFlowRunning(String flowName, String flowGroup) {
List<FlowStatus> flowStatusList = getLatestFlowStatus(flowName, flowGroup, 1, null);
if (flowStatusList == null || flowStatusList.isEmpty()) {
return false;
} else {
FlowStatus flowStatus = flowStatusList.get(0);
ExecutionStatus flowExecutionStatus = flowStatus.getFlowExecutionStatus();
return !FINISHED_STATUSES.contains(flowExecutionStatus.name());
}
}
/** @return only `jobStatuses` that represent a flow or, when `tag != null`, represent a job tagged as `tag` */
private Iterator<JobStatus> retainStatusOfAnyFlowOrJobMatchingTag(Iterator<JobStatus> jobStatuses, String tag) {
Predicate<JobStatus> matchesTag = js -> JobStatusRetriever.isFlowStatus(js) ||
(js.getJobTag() != null && js.getJobTag().equals(tag));
Predicate<JobStatus> p = tag == null ? Predicates.alwaysTrue() : matchesTag;
return Iterators.filter(jobStatuses, p);
}
}
| 1,640 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/JobStatusRetriever.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Ordering;
import com.typesafe.config.ConfigFactory;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterUtils;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.util.ConfigUtils;
/**
* Retriever for {@link JobStatus}.
*/
@Slf4j
public abstract class JobStatusRetriever implements LatestFlowExecutionIdTracker {
public static final String EVENT_NAME_FIELD = "eventName";
public static final String NA_KEY = "NA";
@Getter
protected final MetricContext metricContext;
@Getter
protected final Boolean dagManagerEnabled;
private final MultiContextIssueRepository issueRepository;
protected JobStatusRetriever(boolean dagManagerEnabled, MultiContextIssueRepository issueRepository) {
this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), getClass());
this.issueRepository = Objects.requireNonNull(issueRepository);
this.dagManagerEnabled = dagManagerEnabled;
}
public abstract Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup,
long flowExecutionId);
public abstract Iterator<JobStatus> getJobStatusesForFlowExecution(String flowName, String flowGroup,
long flowExecutionId, String jobName, String jobGroup);
/**
* Get the latest {@link FlowStatus}es of executions of flows belonging to this flow group. Currently, latest flow execution
* is decided by comparing {@link JobStatus#getFlowExecutionId()}.
* @return `FlowStatus`es of `flowGroup`, ordered by ascending flowName, with all of each name adjacent and by descending flowExecutionId.
*
* NOTE: return `List`, not `Iterator` for non-side-effecting access.
*/
public abstract List<FlowStatus> getFlowStatusesForFlowGroupExecutions(String flowGroup, int countJobStatusesPerFlowName);
public long getLatestExecutionIdForFlow(String flowName, String flowGroup) {
List<Long> lastKExecutionIds = getLatestExecutionIdsForFlow(flowName, flowGroup, 1);
return lastKExecutionIds != null && !lastKExecutionIds.isEmpty() ? lastKExecutionIds.get(0) : -1L;
}
/**
* Get the latest {@link JobStatus}es that belongs to the same latest flow execution. Currently, latest flow execution
* is decided by comparing {@link JobStatus#getFlowExecutionId()}.
*/
public Iterator<JobStatus> getLatestJobStatusByFlowNameAndGroup(String flowName, String flowGroup) {
long latestExecutionId = getLatestExecutionIdForFlow(flowName, flowGroup);
return latestExecutionId == -1L ? Collections.emptyIterator()
: getJobStatusesForFlowExecution(flowName, flowGroup, latestExecutionId);
}
/**
*
* @param jobState instance of {@link State}
* @return deserialize {@link State} into a {@link JobStatus}.
*/
protected JobStatus getJobStatus(State jobState) {
String flowGroup = getFlowGroup(jobState);
String flowName = getFlowName(jobState);
long flowExecutionId = getFlowExecutionId(jobState);
String jobName = getJobName(jobState);
String jobGroup = getJobGroup(jobState);
String jobTag = jobState.getProp(TimingEvent.FlowEventConstants.JOB_TAG_FIELD);
long jobExecutionId = getJobExecutionId(jobState);
String eventName = jobState.getProp(JobStatusRetriever.EVENT_NAME_FIELD);
long orchestratedTime = Long.parseLong(jobState.getProp(TimingEvent.JOB_ORCHESTRATED_TIME, "0"));
long startTime = Long.parseLong(jobState.getProp(TimingEvent.JOB_START_TIME, "0"));
long endTime = Long.parseLong(jobState.getProp(TimingEvent.JOB_END_TIME, "0"));
String message = jobState.getProp(TimingEvent.METADATA_MESSAGE, "");
String lowWatermark = jobState.getProp(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD, "");
String highWatermark = jobState.getProp(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD, "");
long processedCount = Long.parseLong(jobState.getProp(TimingEvent.FlowEventConstants.PROCESSED_COUNT_FIELD, "0"));
int maxAttempts = Integer.parseInt(jobState.getProp(TimingEvent.FlowEventConstants.MAX_ATTEMPTS_FIELD, "1"));
int currentAttempts = Integer.parseInt(jobState.getProp(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, "1"));
int currentGeneration = Integer.parseInt(jobState.getProp(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, "1"));
boolean shouldRetry = Boolean.parseBoolean(jobState.getProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, "false"));
int progressPercentage = jobState.getPropAsInt(TimingEvent.JOB_COMPLETION_PERCENTAGE, 0);
long lastProgressEventTime = jobState.getPropAsLong(TimingEvent.JOB_LAST_PROGRESS_EVENT_TIME, 0);
String contextId = TroubleshooterUtils.getContextIdForJob(jobState.getProperties());
Supplier<List<Issue>> jobIssues = Suppliers.memoize(() -> {
List<Issue> issues;
try {
issues = issueRepository.getAll(contextId);
} catch (TroubleshooterException e) {
log.warn("Cannot retrieve job issues", e);
issues = Collections.emptyList();
}
return issues;
});
return JobStatus.builder().flowName(flowName).flowGroup(flowGroup).flowExecutionId(flowExecutionId).
jobName(jobName).jobGroup(jobGroup).jobTag(jobTag).jobExecutionId(jobExecutionId).eventName(eventName).
lowWatermark(lowWatermark).highWatermark(highWatermark).orchestratedTime(orchestratedTime).startTime(startTime).endTime(endTime).
message(message).processedCount(processedCount).maxAttempts(maxAttempts).currentAttempts(currentAttempts).currentGeneration(currentGeneration).
shouldRetry(shouldRetry).progressPercentage(progressPercentage).lastProgressEventTime(lastProgressEventTime).
issues(jobIssues).build();
}
protected final String getFlowGroup(State jobState) {
return jobState.getProp(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD);
}
protected final String getFlowName(State jobState) {
return jobState.getProp(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD);
}
protected final long getFlowExecutionId(State jobState) {
return Long.parseLong(jobState.getProp(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD));
}
protected final String getJobGroup(State jobState) {
return jobState.getProp(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD);
}
protected final String getJobName(State jobState) {
return jobState.getProp(TimingEvent.FlowEventConstants.JOB_NAME_FIELD);
}
protected final long getJobExecutionId(State jobState) {
return Long.parseLong(jobState.getProp(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD, "0"));
}
protected Iterator<JobStatus> asJobStatuses(List<State> jobStatusStates) {
return jobStatusStates.stream().map(this::getJobStatus).iterator();
}
protected List<FlowStatus> asFlowStatuses(List<FlowExecutionJobStateGrouping> flowExecutionGroupings) {
return flowExecutionGroupings.stream().map(exec -> {
List<JobStatus> jobStatuses = ImmutableList.copyOf(asJobStatuses(exec.getJobStates().stream().sorted(
// rationalized order, to facilitate test assertions
Comparator.comparing(this::getJobGroup).thenComparing(this::getJobName).thenComparing(this::getJobExecutionId)
).collect(Collectors.toList())));
return new FlowStatus(exec.getFlowName(), exec.getFlowGroup(), exec.getFlowExecutionId(), jobStatuses.iterator(),
getFlowStatusFromJobStatuses(dagManagerEnabled, jobStatuses.iterator()));
}).collect(Collectors.toList());
}
@AllArgsConstructor
@Getter
protected static class FlowExecutionJobStateGrouping {
private final String flowGroup;
private final String flowName;
private final long flowExecutionId;
private final List<State> jobStates;
}
protected List<FlowExecutionJobStateGrouping> groupByFlowExecutionAndRetainLatest(
String flowGroup, List<State> jobStatusStates, int maxCountPerFlowName) {
Map<String, Map<Long, List<State>>> statesByFlowExecutionIdByName =
jobStatusStates.stream().collect(Collectors.groupingBy(
this::getFlowName,
Collectors.groupingBy(this::getFlowExecutionId)));
return statesByFlowExecutionIdByName.entrySet().stream().sorted(Map.Entry.comparingByKey()).flatMap(flowNameEntry -> {
String flowName = flowNameEntry.getKey();
Map<Long, List<State>> statesByFlowExecutionIdForName = flowNameEntry.getValue();
List<Long> executionIds = Ordering.<Long>natural().greatestOf(statesByFlowExecutionIdForName.keySet(), maxCountPerFlowName);
return executionIds.stream().map(executionId ->
new FlowExecutionJobStateGrouping(flowGroup, flowName, executionId, statesByFlowExecutionIdForName.get(executionId)));
}).collect(Collectors.toList());
}
public abstract StateStore<State> getStateStore();
/**
* Check if a {@link org.apache.gobblin.service.monitoring.JobStatus} is the special job status that represents the
* entire flow's status
*/
public static boolean isFlowStatus(org.apache.gobblin.service.monitoring.JobStatus jobStatus) {
return jobStatus.getJobName() != null && jobStatus.getJobGroup() != null
&& jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY) && jobStatus.getJobGroup().equals(JobStatusRetriever.NA_KEY);
}
public static ExecutionStatus getFlowStatusFromJobStatuses(boolean dagManagerEnabled, Iterator<JobStatus> jobStatusIterator) {
ExecutionStatus flowExecutionStatus = ExecutionStatus.$UNKNOWN;
if (dagManagerEnabled) {
while (jobStatusIterator.hasNext()) {
JobStatus jobStatus = jobStatusIterator.next();
// Check if this is the flow status instead of a single job status
if (JobStatusRetriever.isFlowStatus(jobStatus)) {
flowExecutionStatus = ExecutionStatus.valueOf(jobStatus.getEventName());
}
}
} else {
Set<ExecutionStatus> jobStatuses = new HashSet<>();
while (jobStatusIterator.hasNext()) {
JobStatus jobStatus = jobStatusIterator.next();
// because in absence of DagManager we do not get all flow level events, we will ignore the flow level events
// we actually get and purely calculate flow status based on flow statuses.
if (!JobStatusRetriever.isFlowStatus(jobStatus)) {
jobStatuses.add(ExecutionStatus.valueOf(jobStatus.getEventName()));
}
}
List<ExecutionStatus> statusesInDescendingSalience = ImmutableList.of(ExecutionStatus.FAILED, ExecutionStatus.CANCELLED,
ExecutionStatus.RUNNING, ExecutionStatus.ORCHESTRATED, ExecutionStatus.COMPLETE);
flowExecutionStatus = statusesInDescendingSalience.stream().filter(jobStatuses::contains).findFirst().orElse(ExecutionStatus.$UNKNOWN);
}
return flowExecutionStatus;
}
}
| 1,641 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-runtime/src/main/java/org/apache/gobblin/service/monitoring/KillFlowEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import lombok.AllArgsConstructor;
import lombok.Data;
@AllArgsConstructor
@Data
public class KillFlowEvent {
private String flowGroup;
private String flowName;
private Long flowExecutionId;
}
| 1,642 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin/runtime/CheckpointableWatermarkState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.runtime;
import com.google.gson.Gson;
import gobblin.source.extractor.CheckpointableWatermark;
/***
* Shim layer for org.apache.gobblin.runtime.CheckpointableWatermarkState
*/
public class CheckpointableWatermarkState extends org.apache.gobblin.runtime.CheckpointableWatermarkState {
public CheckpointableWatermarkState(CheckpointableWatermark watermark, Gson gson) {
super(watermark, gson);
}
public CheckpointableWatermarkState() {
super();
}
}
| 1,643 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin/runtime/TaskState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.runtime;
public class TaskState extends org.apache.gobblin.runtime.TaskState {
}
| 1,644 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin/runtime/JobState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.runtime;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.State;
/***
* Shim layer for org.apache.gobblin.runtime.JobState
*/
public class JobState extends org.apache.gobblin.runtime.JobState {
// Necessary for serialization/deserialization
public JobState() {
}
public JobState(String jobName, String jobId) {
super(jobName, jobId);
}
public JobState(State properties, Map<String, DatasetState> previousDatasetStates, String jobName,
String jobId) {
super(properties, adaptDatasetStateMap(previousDatasetStates), jobName, jobId);
}
private static Map<String, org.apache.gobblin.runtime.JobState.DatasetState> adaptDatasetStateMap(
Map<String, DatasetState> previousDatasetStates) {
return previousDatasetStates.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey,
e -> new org.apache.gobblin.runtime.JobState.DatasetState(e.getValue().getJobName(), e.getValue().getId())));
}
/***
* Shim layer for org.apache.gobblin.runtime.JobState.DatasetState
*/
public static class DatasetState extends org.apache.gobblin.runtime.JobState.DatasetState {
// For serialization/deserialization
public DatasetState() {
super();
}
public DatasetState(String jobName, String jobId) {
super(jobName, jobId);
}
}
}
| 1,645 |
0 | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/main/java/gobblin/runtime/mapreduce/GobblinWorkUnitsInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.runtime.mapreduce;
/***
* Shim layer for org.apache.gobblin.runtime.mapreduce.GobblinWorkUnitsInputFormat
*/
public class GobblinWorkUnitsInputFormat extends org.apache.gobblin.runtime.mapreduce.GobblinWorkUnitsInputFormat {
public static class GobblinSplit extends org.apache.gobblin.runtime.mapreduce.GobblinWorkUnitsInputFormat.GobblinSplit {
}
public static class GobblinRecordReader extends org.apache.gobblin.runtime.mapreduce.GobblinWorkUnitsInputFormat.GobblinRecordReader {
public GobblinRecordReader(GobblinSplit split) {
super(split);
}
}
}
| 1,646 |
0 | Create_ds/gobblin/gobblin-runtime/src/jmh/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-runtime/src/jmh/java/org/apache/gobblin/runtime/fork/MockTaskContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.fork;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyCheckResults;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicyChecker;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import java.io.IOException;
class MockTaskContext extends TaskContext {
public MockTaskContext(WorkUnitState workUnitState) {
super(workUnitState);
}
@Override
public DataWriterBuilder getDataWriterBuilder(int branches, int index) {
return new MockDataWriterBuilder();
}
@Override
public RowLevelPolicyChecker getRowLevelPolicyChecker() throws Exception {
return new MockRowLevelPolicyChecker();
}
@Override
public RowLevelPolicyChecker getRowLevelPolicyChecker(int index) throws Exception {
return new MockRowLevelPolicyChecker();
}
private class MockRowLevelPolicyChecker extends RowLevelPolicyChecker {
MockRowLevelPolicyChecker() throws IOException {
super(Lists.<RowLevelPolicy>newArrayList(), "0", null);
}
@Override
public boolean executePolicies(Object record, RowLevelPolicyCheckResults results) throws IOException {
return true;
}
}
private static class MockDataWriterBuilder extends DataWriterBuilder {
@Override
public DataWriter build() throws IOException {
return new MockDataWriterBuilder.MockDataWriter();
}
private static class MockDataWriter implements DataWriter {
private int count = 0;
@Override
public void write(Object record) throws IOException {
count++;
}
@Override
public void commit() throws IOException {
}
@Override
public void cleanup() throws IOException {
}
@Override
public long recordsWritten() {
return count;
}
@Override
public long bytesWritten() throws IOException {
return 0;
}
@Override
public void close() throws IOException {
}
}
}
}
| 1,647 |
0 | Create_ds/gobblin/gobblin-example/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-example/src/test/java/org/apache/gobblin/example/TestOneShotRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example;
import java.net.URL;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.example.generic.OneShotRunner;
import org.apache.gobblin.runtime.api.Configurable;
public class TestOneShotRunner {
@Test
public void testConfiguration() {
OneShotRunner runner = new OneShotRunner();
URL appConfResource = getClass().getClassLoader().getResource("appConf.conf");
URL baseConfResource = getClass().getClassLoader().getResource("baseConf.conf");
runner.appConf("file://" + appConfResource.getFile());
runner.baseConf("file://" + baseConfResource.getFile());
Assert.assertEquals(runner.getJobFile().get(), new Path("file://" + appConfResource.getPath()));
Configurable resolvedSysConfig = runner.getSysConfig();
Assert.assertEquals(resolvedSysConfig.getConfig().getString("test.key1"), "value1");
Assert.assertEquals(resolvedSysConfig.getConfig().getString("test.key2"), "value2");
}
}
| 1,648 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/WikipediaSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.wikipedia;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
import com.google.common.base.Splitter;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.JsonElement;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.Extract.TableType;
/**
* An implementation of {@link Source} for the Wikipedia example.
*
* <p>
* This source creates a {@link org.apache.gobblin.source.workunit.WorkUnit}, and uses
* {@link WikipediaExtractor} to pull the data from Wikipedia.
* </p>
*
* @author Ziyang Liu
*/
public class WikipediaSource extends AbstractSource<String, JsonElement> {
public static final String ARTICLE_TITLE="gobblin.wikipediaSource.workUnit.title";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
Map<String, Iterable<WorkUnitState>> previousWorkUnits = state.getPreviousWorkUnitStatesByDatasetUrns();
List<String> titles = new LinkedList<>(Splitter.on(",").omitEmptyStrings().
splitToList(state.getProp(WikipediaExtractor.SOURCE_PAGE_TITLES)));
Map<String, LongWatermark> prevHighWatermarks = Maps.newHashMap();
for (Map.Entry<String, Iterable<WorkUnitState>> entry : previousWorkUnits.entrySet()) {
Iterable<LongWatermark> watermarks =
Iterables.transform(entry.getValue(), new Function<WorkUnitState, LongWatermark>() {
@Override
public LongWatermark apply(WorkUnitState wus) {
return wus.getActualHighWatermark(LongWatermark.class);
}
});
watermarks = Iterables.filter(watermarks, Predicates.notNull());
List<LongWatermark> watermarkList = Lists.newArrayList(watermarks);
if (watermarkList.size() > 0) {
prevHighWatermarks.put(entry.getKey(), Collections.max(watermarkList));
}
}
Extract extract = createExtract(TableType.SNAPSHOT_ONLY,
state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY), "WikipediaOutput");
List<WorkUnit> workUnits = Lists.newArrayList();
for (String title : titles) {
LongWatermark prevWatermark = prevHighWatermarks.containsKey(title) ? prevHighWatermarks.get(title) :
new LongWatermark(-1);
prevHighWatermarks.remove(title);
WorkUnit workUnit = WorkUnit.create(extract, new WatermarkInterval(prevWatermark, new LongWatermark(-1)));
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, title);
workUnits.add(workUnit);
}
for (Map.Entry<String, LongWatermark> nonProcessedDataset : prevHighWatermarks.entrySet()) {
WorkUnit workUnit = WorkUnit.create(extract, new WatermarkInterval(nonProcessedDataset.getValue(),
nonProcessedDataset.getValue()));
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, nonProcessedDataset.getKey());
workUnits.add(workUnit);
}
return workUnits;
}
@Override
public Extractor<String, JsonElement> getExtractor(WorkUnitState state) throws IOException {
return new WikipediaExtractor(state);
}
@Override
public void shutdown(SourceState state) {
//nothing to do
}
}
| 1,649 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/WikipediaConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.wikipedia;
import java.lang.reflect.Type;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.reflect.TypeToken;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.ToAvroConverterBase;
/**
* An implementation of {@link org.apache.gobblin.converter.Converter} for the Wikipedia example.
*
*<p>
* This converter converts the input string schema into an Avro {@link org.apache.avro.Schema}
* and each input JSON document into an Avro {@link org.apache.avro.generic.GenericRecord}.
* </p>
*
* @author Ziyang Liu
*/
public class WikipediaConverter extends ToAvroConverterBase<String, JsonElement> {
private static final String JSON_CONTENT_MEMBER = "content";
private static final Gson GSON = new Gson();
// Expect the input JSON string to be key-value pairs
private static final Type FIELD_ENTRY_TYPE = new TypeToken<Map<String, Object>>() {}.getType();
@Override
public Schema convertSchema(String schema, WorkUnitState workUnit) {
return new Schema.Parser().parse(schema);
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, JsonElement inputRecord, WorkUnitState workUnit) {
JsonElement element = GSON.fromJson(inputRecord, JsonElement.class);
Map<String, Object> fields = GSON.fromJson(element, FIELD_ENTRY_TYPE);
GenericRecord record = new GenericData.Record(outputSchema);
for (Map.Entry<String, Object> entry : fields.entrySet()) {
if (entry.getKey().equals("*")) {
//switch '*' to 'content' since '*' is not a valid avro schema field name
record.put(JSON_CONTENT_MEMBER, entry.getValue());
} else {
if (outputSchema.getField(entry.getKey()) != null) {
record.put(entry.getKey(), entry.getValue());
}
}
}
return new SingleRecordIterable<>(record);
}
}
| 1,650 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/EmbeddedWikipediaExample.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.wikipedia;
import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.commons.cli.CommandLine;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.publisher.BaseDataPublisher;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.cli.CliObjectOption;
import org.apache.gobblin.runtime.cli.CliObjectSupport;
import org.apache.gobblin.runtime.cli.PublicMethodsGobblinCliFactory;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.runtime.template.ResourceBasedJobTemplate;
import org.apache.gobblin.writer.AvroDataWriterBuilder;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.WriterOutputFormat;
/**
* Embedded Gobblin to run Wikipedia example.
*/
public class EmbeddedWikipediaExample extends EmbeddedGobblin {
@Alias(value = "wikipedia", description = "Gobblin example that downloads revisions from Wikipedia.")
public static class CliFactory extends PublicMethodsGobblinCliFactory {
public CliFactory() {
super(EmbeddedWikipediaExample.class);
}
@Override
public EmbeddedGobblin constructEmbeddedGobblin(CommandLine cli) throws JobTemplate.TemplateException, IOException {
String[] leftoverArgs = cli.getArgs();
if (leftoverArgs.length < 1) {
throw new RuntimeException("Unexpected number of arguments.");
}
return new EmbeddedWikipediaExample(leftoverArgs);
}
@Override
public String getUsageString() {
return "[OPTIONS] <article-title> [<article-title> ...]";
}
}
@CliObjectSupport(argumentNames = {"topics"})
public EmbeddedWikipediaExample(String... topics) throws JobTemplate.TemplateException, IOException {
super("Wikipedia");
try {
setTemplate(ResourceBasedJobTemplate.forResourcePath("wikipedia.template"));
} catch (URISyntaxException | SpecNotFoundException exc) {
throw new RuntimeException("Could not instantiate an " + EmbeddedWikipediaExample.class.getName(), exc);
}
this.setConfiguration("titles", String.join(",", topics));
}
/**
* Set bootstrap lookback, i.e. oldest revision to pull.
*/
@CliObjectOption(description = "Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)")
public EmbeddedWikipediaExample lookback(String isoLookback) {
this.setConfiguration(WikipediaExtractor.BOOTSTRAP_PERIOD, isoLookback);
return this;
}
/**
* Write output to avro files at the given input location.
*/
@CliObjectOption(description = "Write output to Avro files. Specify the output directory as argument.")
public EmbeddedWikipediaExample avroOutput(String outputPath) {
this.setConfiguration(ConfigurationKeys.WRITER_BUILDER_CLASS, AvroDataWriterBuilder.class.getName());
this.setConfiguration(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, Destination.DestinationType.HDFS.name());
this.setConfiguration(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, WriterOutputFormat.AVRO.name());
this.setConfiguration(ConfigurationKeys.WRITER_PARTITIONER_CLASS, WikipediaPartitioner.class.getName());
this.setConfiguration(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE, BaseDataPublisher.class.getName());
this.setConfiguration(ConfigurationKeys.CONVERTER_CLASSES_KEY, WikipediaConverter.class.getName());
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, outputPath);
return this;
}
}
| 1,651 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/WikipediaExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.wikipedia;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.NameValuePair;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.message.BasicNameValuePair;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.http.HttpClientConfigurator;
import org.apache.gobblin.http.HttpClientConfiguratorLoader;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
/**
* An implementation of {@link Extractor} for the Wikipedia example.
*
* <p>
* This extractor uses the MediaWiki web API to retrieve a certain number of latest revisions
* for each specified title from Wikipedia. Each revision is returned as a JSON document.
* </p>
*
* @author Ziyang Liu
*/
public class WikipediaExtractor implements Extractor<String, JsonElement> {
private static final Logger LOG = LoggerFactory.getLogger(WikipediaExtractor.class);
private static final DateTimeFormatter WIKIPEDIA_TIMESTAMP_FORMAT = DateTimeFormat.forPattern("YYYYMMddHHmmss");
public static final String CONFIG_PREFIX = "gobblin.wikipediaSource.";
public static final String MAX_REVISION_PER_PAGE = CONFIG_PREFIX+ "maxRevisionsPerPage";
public static final int DEFAULT_MAX_REVISIONS_PER_PAGE = -1;
public static final String HTTP_CLIENT_CONFIG_PREFIX = CONFIG_PREFIX + "httpClient.";
public static final String SOURCE_PAGE_TITLES = "source.page.titles";
public static final String BOOTSTRAP_PERIOD = "wikipedia.source.bootstrap.lookback";
public static final String DEFAULT_BOOTSTRAP_PERIOD = "P2D";
public static final String WIKIPEDIA_API_ROOTURL = "wikipedia.api.rooturl";
public static final String WIKIPEDIA_AVRO_SCHEMA = "wikipedia.avro.schema";
private static final String JSON_MEMBER_QUERY = "query";
private static final String JSON_MEMBER_PAGES = "pages";
private static final String JSON_MEMBER_REVISIONS = "revisions";
private static final String JSON_MEMBER_PAGEID = "pageid";
private static final String JSON_MEMBER_TITLE = "title";
private static final Gson GSON = new Gson();
private final WikiResponseReader reader;
private final String rootUrl;
private final String schema;
private final String requestedTitle;
private final int batchSize;
private final long lastRevisionId;
private Queue<JsonElement> currentBatch;
private final ImmutableMap<String, String> baseQuery;
private final WorkUnitState workUnitState;
private final int maxRevisionsPulled;
private final HttpClientConfigurator httpClientConfigurator;
private HttpClient httpClient;
private class WikiResponseReader implements Iterator<JsonElement> {
private long lastPulledRevision;
private long revisionsPulled = 0;
public WikiResponseReader(long latestPulledRevision) {
this.lastPulledRevision = latestPulledRevision;
}
@Override
public boolean hasNext() {
if (WikipediaExtractor.this.maxRevisionsPulled > -1
&& this.revisionsPulled >= WikipediaExtractor.this.maxRevisionsPulled) {
WikipediaExtractor.this.workUnitState.setActualHighWatermark(new LongWatermark(this.lastPulledRevision));
LOG.info("Pulled max number of records {}, final revision pulled {}.", this.revisionsPulled,
this.lastPulledRevision);
return false;
}
if (!WikipediaExtractor.this.currentBatch.isEmpty()) {
return true;
} else {
/*
* Retrieve revisions for the next title. Repeat until we find a title that has at least one revision,
* otherwise return false
*/
if (this.lastPulledRevision >= WikipediaExtractor.this.lastRevisionId) {
return false;
}
try {
WikipediaExtractor.this.currentBatch = retrievePageRevisions(ImmutableMap.<String, String>builder()
.putAll(WikipediaExtractor.this.baseQuery)
.put("rvprop", "ids|timestamp|user|userid|size")
.put("titles", WikipediaExtractor.this.requestedTitle)
.put("rvlimit", Integer.toString(WikipediaExtractor.this.batchSize + 1))
.put("rvstartid", Long.toString(this.lastPulledRevision))
.put("rvendid", Long.toString(WikipediaExtractor.this.lastRevisionId))
.put("rvdir", "newer")
.build());
// discard the first one (we've already pulled it)
WikipediaExtractor.this.currentBatch.poll();
} catch (URISyntaxException | IOException use) {
LOG.error("Could not retrieve more revisions.", use);
return false;
}
return !WikipediaExtractor.this.currentBatch.isEmpty();
}
}
@Override
public JsonElement next() {
if (!hasNext()) {
return null;
}
JsonElement element = WikipediaExtractor.this.currentBatch.poll();
this.lastPulledRevision = parseRevision(element);
this.revisionsPulled++;
return element;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
public WikipediaExtractor(WorkUnitState workUnitState) throws IOException {
this.workUnitState = workUnitState;
this.rootUrl = readProp(WIKIPEDIA_API_ROOTURL, workUnitState);
this.schema = readProp(WIKIPEDIA_AVRO_SCHEMA, workUnitState);
this.batchSize = 5;
this.requestedTitle = workUnitState.getProp(ConfigurationKeys.DATASET_URN_KEY);
this.baseQuery =
ImmutableMap.<String, String>builder().put("format", "json").put("action","query").put("prop","revisions").build();
HttpClientConfiguratorLoader httpClientConfiguratorLoader
= new HttpClientConfiguratorLoader(workUnitState);
this.httpClientConfigurator = httpClientConfiguratorLoader.getConfigurator();
this.httpClientConfigurator.setStatePropertiesPrefix(HTTP_CLIENT_CONFIG_PREFIX)
.configure(workUnitState);
try {
Queue<JsonElement> lastRevision = retrievePageRevisions(ImmutableMap.<String, String>builder().putAll(this.baseQuery)
.put("rvprop","ids").put("titles",this.requestedTitle).put("rvlimit","1").build());
this.lastRevisionId = lastRevision.isEmpty() ? -1 : parseRevision(lastRevision.poll());
} catch (URISyntaxException use) {
throw new IOException(use);
}
long baseRevision = workUnitState.getWorkunit().getLowWatermark(LongWatermark.class, new Gson()).getValue();
if (baseRevision < 0) {
try {
baseRevision = createLowWatermarkForBootstrap(workUnitState);
} catch (IOException ioe) {
baseRevision = this.lastRevisionId;
}
}
this.reader = new WikiResponseReader(baseRevision);
workUnitState.setActualHighWatermark(new LongWatermark(this.lastRevisionId));
this.currentBatch = new LinkedList<>();
LOG.info(String.format("Will pull revisions %s to %s for page %s.",this.reader.lastPulledRevision,
this.lastRevisionId, this.requestedTitle));
this.maxRevisionsPulled = workUnitState.getPropAsInt(MAX_REVISION_PER_PAGE, DEFAULT_MAX_REVISIONS_PER_PAGE);
}
private long parseRevision(JsonElement element) {
return element.getAsJsonObject().get("revid").getAsLong();
}
private long createLowWatermarkForBootstrap(WorkUnitState state) throws IOException {
String bootstrapPeriodString = state.getProp(BOOTSTRAP_PERIOD, DEFAULT_BOOTSTRAP_PERIOD);
Period period = Period.parse(bootstrapPeriodString);
DateTime startTime = DateTime.now().minus(period);
try {
Queue<JsonElement> firstRevision = retrievePageRevisions(ImmutableMap.<String, String>builder().putAll(this.baseQuery)
.put("rvprop", "ids")
.put("titles", this.requestedTitle)
.put("rvlimit", "1")
.put("rvstart", WIKIPEDIA_TIMESTAMP_FORMAT.print(startTime))
.put("rvdir", "newer")
.build());
if (firstRevision.isEmpty()) {
throw new IOException("Could not retrieve oldest revision, returned empty revisions list.");
}
return parseRevision(firstRevision.poll());
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
private String readProp(String key, WorkUnitState workUnitState) {
String value = workUnitState.getWorkunit().getProp(key);
if (StringUtils.isBlank(value)) {
value = workUnitState.getProp(key);
}
if (StringUtils.isBlank(value)) {
value = workUnitState.getJobState().getProp(key);
}
return value;
}
private JsonElement performHttpQuery(String rootUrl, Map<String, String> query) throws URISyntaxException, IOException {
if (null == this.httpClient) {
this.httpClient = createHttpClient();
}
HttpUriRequest req = createHttpRequest(rootUrl, query);
Closer closer = Closer.create();
StringBuilder sb = new StringBuilder();
try {
HttpResponse response = sendHttpRequest(req, this.httpClient);
if (response instanceof CloseableHttpResponse) {
closer.register((CloseableHttpResponse)response);
}
BufferedReader br = closer.register(
new BufferedReader(new InputStreamReader(response.getEntity().getContent(),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
String line;
while ((line = br.readLine()) != null) {
sb.append(line + "\n");
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
try {
closer.close();
} catch (IOException e) {
LOG.error("IOException in Closer.close() while performing query " + req + ": " + e, e);
}
}
if (Strings.isNullOrEmpty(sb.toString())) {
LOG.warn("Received empty response for query: " + req);
return new JsonObject();
}
JsonElement jsonElement = GSON.fromJson(sb.toString(), JsonElement.class);
return jsonElement;
}
public static URI createRequestURI(String rootUrl, Map<String, String> query)
throws MalformedURLException, URISyntaxException {
List<NameValuePair> queryTokens = Lists.newArrayList();
for (Map.Entry<String, String> entry : query.entrySet()) {
queryTokens.add(new BasicNameValuePair(entry.getKey(), entry.getValue()));
}
String encodedQuery = URLEncodedUtils.format(queryTokens, Charsets.UTF_8);
URI actualURL = new URIBuilder(rootUrl).setQuery(encodedQuery).build();
return actualURL;
}
HttpUriRequest createHttpRequest(String rootUrl, Map<String, String> query)
throws MalformedURLException, URISyntaxException {
URI requestUri = createRequestURI(rootUrl, query);
HttpGet req = new HttpGet(requestUri);
return req;
}
HttpResponse sendHttpRequest(HttpUriRequest req, HttpClient httpClient)
throws ClientProtocolException, IOException {
LOG.debug("Sending request {}", req);
HttpResponse response = httpClient.execute(req);
if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK ||
null == response.getEntity()) {
if (response instanceof CloseableHttpResponse) {
((CloseableHttpResponse)response).close();
}
throw new IOException("HTTP Request " + req + " returned unexpected response " + response);
}
return response;
}
private Queue<JsonElement> retrievePageRevisions(Map<String, String> query)
throws IOException, URISyntaxException {
Queue<JsonElement> retrievedRevisions = new LinkedList<>();
JsonElement jsonElement = performHttpQuery(this.rootUrl, query);
if (jsonElement == null || !jsonElement.isJsonObject()) {
return retrievedRevisions;
}
JsonObject jsonObj = jsonElement.getAsJsonObject();
if (jsonObj == null || !jsonObj.has(JSON_MEMBER_QUERY)) {
return retrievedRevisions;
}
JsonObject queryObj = jsonObj.getAsJsonObject(JSON_MEMBER_QUERY);
if (!queryObj.has(JSON_MEMBER_PAGES)) {
return retrievedRevisions;
}
JsonObject pagesObj = queryObj.getAsJsonObject(JSON_MEMBER_PAGES);
if (pagesObj.entrySet().isEmpty()) {
return retrievedRevisions;
}
JsonObject pageIdObj = pagesObj.getAsJsonObject(pagesObj.entrySet().iterator().next().getKey());
if (!pageIdObj.has(JSON_MEMBER_REVISIONS)) {
return retrievedRevisions;
}
//retrieve revisions of the current pageTitle
JsonArray jsonArr = pageIdObj.getAsJsonArray(JSON_MEMBER_REVISIONS);
for (JsonElement revElement : jsonArr) {
JsonObject revObj = revElement.getAsJsonObject();
/*'pageid' and 'title' are associated with the parent object
* of all revisions. Add them to each individual revision.
*/
if (pageIdObj.has(JSON_MEMBER_PAGEID)) {
revObj.add(JSON_MEMBER_PAGEID, pageIdObj.get(JSON_MEMBER_PAGEID));
}
if (pageIdObj.has(JSON_MEMBER_TITLE)) {
revObj.add(JSON_MEMBER_TITLE, pageIdObj.get(JSON_MEMBER_TITLE));
}
retrievedRevisions.add(revObj);
}
LOG.info(retrievedRevisions.size() + " record(s) retrieved for title " + this.requestedTitle);
return retrievedRevisions;
}
protected HttpClient createHttpClient() {
return this.httpClientConfigurator.createClient();
}
@Override
public void close() throws IOException {
if (null != this.httpClient && this.httpClient instanceof Closeable) {
((Closeable)this.httpClient).close();
}
}
@Override
public String getSchema() {
return this.schema;
}
@Override
public JsonElement readRecord(@Deprecated JsonElement reuse) throws DataRecordException, IOException {
if (this.reader == null) {
return null;
}
if (this.reader.hasNext()) {
return this.reader.next();
}
return null;
}
@Override
public long getExpectedRecordCount() {
return 0;
}
@Override
public long getHighWatermark() {
return this.lastRevisionId;
}
}
| 1,652 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/WikipediaPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.wikipedia;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.writer.partitioner.WriterPartitioner;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
/**
* Partitioner that splits records by title.
*/
public class WikipediaPartitioner implements WriterPartitioner<GenericRecord> {
private static final String TITLE = "title";
private static final Schema SCHEMA = SchemaBuilder.record("ArticleTitle").namespace("gobblin.example.wikipedia")
.fields().name(TITLE).type(Schema.create(Schema.Type.STRING)).noDefault().endRecord();
public WikipediaPartitioner(State state, int numBranches, int branchId) {}
@Override
public Schema partitionSchema() {
return SCHEMA;
}
@Override
public GenericRecord partitionForRecord(GenericRecord record) {
GenericRecord partition = new GenericData.Record(SCHEMA);
partition.put(TITLE, record.get("title"));
return partition;
}
}
| 1,653 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/hivematerializer/HiveMaterializerSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.hivematerializer;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.entities.StageableTableMetadata;
import org.apache.gobblin.data.management.conversion.hive.materializer.HiveMaterializer;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.conversion.hive.task.HiveTask;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.thrift.TException;
import com.google.api.client.repackaged.com.google.common.base.Splitter;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import static org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder.*;
@Slf4j
/**
* A sample source showing how to create work units for Hive materialization. This source allows copying of tables,
* materialization of views, and materialization of queries.
*/
public class HiveMaterializerSource implements Source<Object, Object> {
private static final String HIVE_MATERIALIZER_SOURCE_PREFIX = "gobblin.hiveMaterializerSource";
public static final String COPY_TABLE_KEY = HIVE_MATERIALIZER_SOURCE_PREFIX + ".copyTable";
public static final String MATERIALIZE_VIEW = HIVE_MATERIALIZER_SOURCE_PREFIX + ".materializeView";
public static final String MATERIALIZE_QUERY = HIVE_MATERIALIZER_SOURCE_PREFIX + ".materializeQuery";
public static final String OUTPUT_STORAGE_FORMAT = HIVE_MATERIALIZER_SOURCE_PREFIX + ".outputStorageFormat";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
FileSystem fs = HadoopUtils.getSourceFileSystem(state);
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
if (state.contains(COPY_TABLE_KEY)) {
HiveDataset dataset = getHiveDataset(state.getProp(COPY_TABLE_KEY), fs, state);
WorkUnit workUnit = HiveMaterializer.tableCopyWorkUnit(dataset,
new StageableTableMetadata(config.getConfig(HIVE_MATERIALIZER_SOURCE_PREFIX), dataset.getTable()), null);
HiveTask.disableHiveWatermarker(workUnit);
return Lists.newArrayList(workUnit);
} else if (state.contains(MATERIALIZE_VIEW)) {
HiveDataset dataset = getHiveDataset(state.getProp(MATERIALIZE_VIEW), fs, state);
WorkUnit workUnit = HiveMaterializer.viewMaterializationWorkUnit(dataset, getOutputStorageFormat(state),
new StageableTableMetadata(config.getConfig(HIVE_MATERIALIZER_SOURCE_PREFIX), dataset.getTable()), null);
HiveTask.disableHiveWatermarker(workUnit);
return Lists.newArrayList(workUnit);
} else if (state.contains(MATERIALIZE_QUERY)) {
String query = state.getProp(MATERIALIZE_QUERY);
WorkUnit workUnit = HiveMaterializer.queryResultMaterializationWorkUnit(query, getOutputStorageFormat(state),
new StageableTableMetadata(config.getConfig(HIVE_MATERIALIZER_SOURCE_PREFIX), null));
HiveTask.disableHiveWatermarker(workUnit);
return Lists.newArrayList(workUnit);
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
throw new RuntimeException(String.format("Must specify either %s, %s, or %s.", COPY_TABLE_KEY, MATERIALIZE_QUERY,
MATERIALIZE_VIEW));
}
private HiveDataset getHiveDataset(String tableString, FileSystem fs, State state) throws IOException {
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(state.getProperties(),
Optional.fromNullable(state.getProp(HIVE_METASTORE_URI_KEY)));
List<String> tokens = Splitter.on(".").splitToList(tableString);
DbAndTable sourceDbAndTable = new DbAndTable(tokens.get(0), tokens.get(1));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
Table sourceTable = new Table(client.get().getTable(sourceDbAndTable.getDb(), sourceDbAndTable.getTable()));
return new HiveDataset(fs, pool, sourceTable, ConfigUtils.propertiesToConfig(state.getProperties()));
}
} catch (TException exc) {
throw new RuntimeException(exc);
}
}
private HiveConverterUtils.StorageFormat getOutputStorageFormat(State state) {
return HiveConverterUtils.StorageFormat.valueOf(state.getProp(OUTPUT_STORAGE_FORMAT,
HiveConverterUtils.StorageFormat.TEXT_FILE.name()));
}
@Override
public Extractor<Object, Object> getExtractor(WorkUnitState state) throws IOException {
return null;
}
@Override
public void shutdown(SourceState state) {
}
}
| 1,654 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/simplejson/SimpleJsonExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.simplejson;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import org.apache.commons.vfs2.FileObject;
import org.apache.commons.vfs2.FileSystemException;
import org.apache.commons.vfs2.FileSystemOptions;
import org.apache.commons.vfs2.UserAuthenticator;
import org.apache.commons.vfs2.VFS;
import org.apache.commons.vfs2.auth.StaticUserAuthenticator;
import org.apache.commons.vfs2.impl.DefaultFileSystemConfigBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
/**
* An implementation of {@link Extractor} for the simple JSON example.
*
* <p>
* This extractor uses the commons-vfs library to read the assigned input file storing
* json documents confirming to a schema. Each line of the file is a json document.
* </p>
*
* @author Yinan Li
*/
public class SimpleJsonExtractor implements Extractor<String, String> {
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleJsonExtractor.class);
private static final String SOURCE_FILE_KEY = "source.file";
private final WorkUnitState workUnitState;
private final FileObject fileObject;
private final BufferedReader bufferedReader;
private final Closer closer = Closer.create();
public SimpleJsonExtractor(WorkUnitState workUnitState) throws FileSystemException {
this.workUnitState = workUnitState;
// Resolve the file to pull
if (workUnitState.getPropAsBoolean(ConfigurationKeys.SOURCE_CONN_USE_AUTHENTICATION, false)) {
// Add authentication credential if authentication is needed
UserAuthenticator auth =
new StaticUserAuthenticator(workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_DOMAIN, ""),
workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME), PasswordManager.getInstance(workUnitState)
.readPassword(workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD)));
FileSystemOptions opts = new FileSystemOptions();
DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, auth);
this.fileObject = VFS.getManager().resolveFile(workUnitState.getProp(SOURCE_FILE_KEY), opts);
} else {
this.fileObject = VFS.getManager().resolveFile(workUnitState.getProp(SOURCE_FILE_KEY));
}
// Open the file for reading
LOGGER.info("Opening file " + this.fileObject.getURL().toString());
this.bufferedReader =
this.closer.register(new BufferedReader(new InputStreamReader(this.fileObject.getContent().getInputStream(),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
}
@Override
public String getSchema() {
return this.workUnitState.getProp(ConfigurationKeys.SOURCE_SCHEMA);
}
@Override
public String readRecord(@Deprecated String reuse) throws DataRecordException, IOException {
// Read the next line
return this.bufferedReader.readLine();
}
@Override
public long getExpectedRecordCount() {
// We don't know how many records are in the file before actually reading them
return 0;
}
@Override
public long getHighWatermark() {
// Watermark is not applicable for this type of extractor
return 0;
}
@Override
public void close() throws IOException {
try {
this.closer.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close the input stream", ioe);
}
try {
this.fileObject.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close the file object", ioe);
}
}
}
| 1,655 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/simplejson/SimpleJsonSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.simplejson;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of {@link Source} for the simple JSON example.
*
* <p>
* This source creates one {@link org.apache.gobblin.source.workunit.WorkUnit}
* for each file to pull and uses the {@link SimpleJsonExtractor} to pull the data.
* </p>
*
* @author Yinan Li
*/
public class SimpleJsonSource implements Source<String, String> {
public static final String SOURCE_FILE_KEY = "source.file";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
if (!state.contains(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL)) {
return workUnits;
}
// Create a single snapshot-type extract for all files
Extract extract = new Extract(Extract.TableType.SNAPSHOT_ONLY,
state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY, "ExampleNamespace"), "ExampleTable");
String filesToPull = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL);
for (String file : Splitter.on(',').omitEmptyStrings().split(filesToPull)) {
// Create one work unit for each file to pull
WorkUnit workUnit = WorkUnit.create(extract);
workUnit.setProp(SOURCE_FILE_KEY, file);
workUnits.add(workUnit);
}
return workUnits;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
return new SimpleJsonExtractor(state);
}
@Override
public void shutdown(SourceState state) {
// Nothing to do
}
}
| 1,656 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/simplejson/SimpleJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.simplejson;
import java.lang.reflect.Type;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.reflect.TypeToken;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.ToAvroConverterBase;
/**
* An implementation of {@link Converter} for the simple JSON example.
*
* <p>
* This converter converts the input string schema into an Avro {@link org.apache.avro.Schema}
* and each input json document into an Avro {@link org.apache.avro.generic.GenericRecord}.
* </p>
*
* @author Yinan Li
*/
public class SimpleJsonConverter extends ToAvroConverterBase<String, String> {
private static final Gson GSON = new Gson();
// Expect the input JSON string to be key-value pairs
private static final Type FIELD_ENTRY_TYPE = new TypeToken<Map<String, Object>>() {}.getType();
@Override
public Schema convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return new Schema.Parser().parse(inputSchema);
}
@Override
public Iterable<GenericRecord> convertRecord(Schema schema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonElement element = GSON.fromJson(inputRecord, JsonElement.class);
Map<String, Object> fields = GSON.fromJson(element, FIELD_ENTRY_TYPE);
GenericRecord record = new GenericData.Record(schema);
for (Map.Entry<String, Object> entry : fields.entrySet()) {
record.put(entry.getKey(), entry.getValue());
}
return new SingleRecordIterable<>(record);
}
}
| 1,657 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/hadoop/HadoopTextFileInputExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.hadoop;
import java.io.IOException;
import org.apache.gobblin.source.extractor.hadoop.HadoopFileInputExtractor;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordReader;
/**
* An implementation of {@link org.apache.gobblin.source.extractor.hadoop.HadoopFileInputExtractor} to be used with
* {@link org.apache.gobblin.example.hadoop.HadoopTextFileSource}.
*
* @author Sudarshan Vasudevan
*/
public class HadoopTextFileInputExtractor extends HadoopFileInputExtractor<String,String,LongWritable,Text> {
public HadoopTextFileInputExtractor(RecordReader<LongWritable,Text> recordReader, boolean readKeys) {
super(recordReader,readKeys);
}
@Override
public String getSchema() throws IOException {
return "";
}
}
| 1,658 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/hadoop/HadoopTextFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.hadoop;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.hadoop.HadoopFileInputExtractor;
import org.apache.gobblin.source.extractor.hadoop.HadoopFileInputSource;
/**
* An implementation of {@link org.apache.gobblin.source.extractor.hadoop.HadoopTextInputSource} for reading
* data from Hadoop.
*
* <p>
* This source returns an {@link org.apache.gobblin.example.hadoop.HadoopTextFileInputExtractor} to
* pull the data from Hadoop.
* </p>
*
* @author Sudarshan Vasudevan
*/
public class HadoopTextFileSource extends HadoopFileInputSource<String,String,LongWritable,Text> {
@Override
protected HadoopFileInputExtractor<String,String,LongWritable,Text> getExtractor(WorkUnitState workUnitState, RecordReader recordReader,
FileSplit fileSplit, boolean readKeys) {
return new HadoopTextFileInputExtractor(recordReader, readKeys);
}
}
| 1,659 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/generic/OneShotRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.generic;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.cli.CliObjectOption;
import org.apache.gobblin.runtime.cli.PublicMethodsGobblinCliFactory;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.util.JobConfigurationUtils;
/**
* A class that allows Gobblin cli to run a single pull file (in a blocking manner) and exit on completion
* In contrast, a Gobblin standalone service will run forever polling
* for new pull files in the configured directory
*/
@Slf4j
public class OneShotRunner extends EmbeddedGobblin {
@Alias(value = "oneShot", description = "Gobblin command that runs one pull file in standalone or map-reduce mode")
public static class CliFactory extends PublicMethodsGobblinCliFactory {
public CliFactory() {
super(OneShotRunner.class);
}
@Override
public EmbeddedGobblin constructEmbeddedGobblin(CommandLine cli) throws JobTemplate.TemplateException, IOException {
String[] leftoverArgs = cli.getArgs();
if (leftoverArgs.length != 0) {
throw new RuntimeException("Unexpected number of arguments.");
}
return new OneShotRunner();
}
@Override
public String getUsageString() {
return "[OPTIONS]";
}
}
@SneakyThrows
@CliObjectOption(description = "Runs the job in MR mode")
public OneShotRunner mrMode() {
super.mrMode();
return this;
}
@CliObjectOption(description = "Sets the base configuration file")
public OneShotRunner baseConf(String baseConfFile) {
log.info("Configured with baseConf file = {}", baseConfFile);
try {
Properties sysConfig = JobConfigurationUtils.fileToProperties(baseConfFile);
log.debug("Loaded up base config: {}", sysConfig);
sysConfig.entrySet()
.stream()
.forEach(pair -> super.sysConfig(pair.getKey().toString(), pair.getValue().toString()));
} catch (Exception e) {
throw new RuntimeException("Failed to load configuration from base config file : " + baseConfFile, e);
}
return this;
}
@CliObjectOption(description = "Sets the application configuration file")
public OneShotRunner appConf(String appConfFile) {
super.jobFile(appConfFile);
return this;
}
public OneShotRunner() {
super("Generic");
}
}
| 1,660 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/githubjsontoparquet/EmbeddedGithubJsonToParquet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.githubjsontoparquet;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.apache.commons.cli.CommandLine;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.cli.CliObjectSupport;
import org.apache.gobblin.runtime.cli.PublicMethodsGobblinCliFactory;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.gobblin.runtime.template.ResourceBasedJobTemplate;
import org.apache.gobblin.writer.WriterOutputFormat;
import org.codehaus.plexus.util.FileUtils;
import org.mortbay.log.Log;
import groovy.util.logging.Slf4j;
/**
* Creates a CLI application for running app githubjsontoparquet.
* Cli application takes two arguments:
* 1st arg: Date time (yyyy-mm-dd-hh) of archive to pull, ex: 2015-01-01-25
* 2nd arg: Work dir with filesystem URI (file:///home/someuser/somefolder)";
* Run using:
* bin/gobblin run githubjsontoparquet 2017-12-14-15 file:///Users/someuser/somefolder
* @author tilakpatidar
*/
public class EmbeddedGithubJsonToParquet extends EmbeddedGobblin {
private static final String GITHUB_ARCHIVE_URL_TEMPLATE = "http://data.githubarchive.org/%s.json.gz";
private static final String DOWNLOAD_DIR = "archives";
private static final String ARCHIVE_SUFFIX = ".json.gz";
private static final String WORK_DIR_KEY = "work.dir";
@Slf4j
@Alias(value = "githubjsontoparquet", description = "Extract Github data and write to parquet files")
public static class CliFactory extends PublicMethodsGobblinCliFactory {
public CliFactory() {
super(EmbeddedGithubJsonToParquet.class);
}
@Override
public EmbeddedGobblin constructEmbeddedGobblin(CommandLine cli)
throws JobTemplate.TemplateException, IOException {
String[] args = cli.getArgs();
if (args.length < 1) {
throw new RuntimeException("Expected 2 arguments. " + getUsageString());
}
try {
if (args.length == 2) {
return new EmbeddedGithubJsonToParquet(args[0], args[1]);
}
} catch (JobTemplate.TemplateException | IOException e) {
e.printStackTrace();
}
return null;
}
@Override
public String getUsageString() {
return "<Date time (yyyy-mm-dd-hh) of archive to pull> <Work dir with file system URI>";
}
}
@CliObjectSupport(argumentNames = {"archiveDateAndHour", "workDir"})
public EmbeddedGithubJsonToParquet(String archiveDateAndHour, String workDir)
throws JobTemplate.TemplateException, IOException {
super("githubjsontoparquet");
URL workDirUrl;
try {
workDirUrl = new URL(workDir);
} catch (MalformedURLException e) {
e.printStackTrace();
throw new RuntimeException("Work directory URI with no protocol or malformed.");
}
// Set configuration
String fsProtocol = workDirUrl.getProtocol() + ":///";
this.setConfiguration(WORK_DIR_KEY, workDir);
this.setConfiguration(ConfigurationKeys.FS_URI_KEY, fsProtocol);
this.setConfiguration(ConfigurationKeys.STATE_STORE_ENABLED, "true");
this.setConfiguration(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, workDir + "/store");
this.setConfiguration(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, fsProtocol);
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_FILE_SYSTEM_URI, fsProtocol);
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, workDir + "/event_data");
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_METADATA_OUTPUT_DIR, workDir + "/metadata");
this.setConfiguration(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, WriterOutputFormat.PARQUET.toString());
//Set template
try {
setTemplate(ResourceBasedJobTemplate.forResourcePath("githubjsontoparquet.template"));
} catch (URISyntaxException | SpecNotFoundException e) {
e.printStackTrace();
throw new RuntimeException("Cannot set template");
}
// Download the archive
String fileUrl = String.format(GITHUB_ARCHIVE_URL_TEMPLATE, archiveDateAndHour);
Path downloadDirPath = createDownloadDir(workDirUrl.getPath(), fileUrl);
Path downloadFile = getAbsoluteDownloadFilePath(downloadDirPath, archiveDateAndHour);
downloadFile(fileUrl, downloadFile);
}
private Path getAbsoluteDownloadFilePath(Path downloadDirPath, String archiveDateAndHour) {
String downloadFileName = archiveDateAndHour + ARCHIVE_SUFFIX;
return Paths.get(downloadDirPath.toString(), downloadFileName);
}
private Path createDownloadDir(String workDir, String fileUrl) {
Path downloadDirPath = Paths.get(workDir, DOWNLOAD_DIR);
File downloadDirFile = downloadDirPath.toFile();
try {
Log.info(String.format("Creating download dir %s", downloadDirFile.toPath().toString()));
FileUtils.forceMkdir(downloadDirFile);
} catch (IOException e) {
throw new RuntimeException(String
.format("Unable to create download location for archive: %s at %s", fileUrl, downloadDirPath.toString()));
}
Log.info(String.format("Created download dir %s", downloadDirFile.toPath().toString()));
return downloadDirPath;
}
private void downloadFile(String fileUrl, Path destination) {
if (destination.toFile().exists()) {
Log.info(String.format("Skipping download for %s at %s because destination already exists", fileUrl,
destination.toString()));
return;
}
try {
URL archiveUrl = new URL(fileUrl);
ReadableByteChannel rbc = Channels.newChannel(archiveUrl.openStream());
FileOutputStream fos = new FileOutputStream(String.valueOf(destination));
Log.info(String.format("Downloading %s at %s", fileUrl, destination.toString()));
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
Log.info(String.format("Download complete for %s at %s", fileUrl, destination.toString()));
} catch (IOException e) {
e.printStackTrace();
}
}
}
| 1,661 |
0 | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example | Create_ds/gobblin/gobblin-example/src/main/java/org/apache/gobblin/example/githubjsontoparquet/GithubDataEventTypesPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.example.githubjsontoparquet;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.parquet.ParquetGroup;
import org.apache.gobblin.writer.partitioner.WriterPartitioner;
/**
* Partitioner for github json records based on 'PARTITION_KEY' key.
* @author tilakpatidar
*/
public class GithubDataEventTypesPartitioner implements WriterPartitioner<ParquetGroup> {
private static final String PARTITION_KEY = "type";
private static final Schema SCHEMA =
SchemaBuilder.record("Schema").namespace("gobblin.writer.partitioner").fields().name(PARTITION_KEY)
.type(Schema.create(Schema.Type.STRING)).noDefault().endRecord();
public GithubDataEventTypesPartitioner(State state, int numBranches, int branchId) {
}
@Override
public Schema partitionSchema() {
return SCHEMA;
}
@Override
public GenericRecord partitionForRecord(ParquetGroup record) {
GenericRecord partition = new GenericData.Record(SCHEMA);
partition.put(PARTITION_KEY, record.getString(PARTITION_KEY, 0).replace("\"", ""));
return partition;
}
}
| 1,662 |
0 | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness/audit/TestAuditClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import java.util.HashMap;
import java.util.Map;
import org.apache.gobblin.configuration.State;
public class TestAuditClient implements AuditCountClient {
Map<String, Long> tierCounts;
public TestAuditClient(State state) {
tierCounts = new HashMap<>();
}
public void setTierCounts(Map<String, Long> tierCounts) {
this.tierCounts = tierCounts;
}
@Override
public Map<String, Long> fetch(String topic, long start, long end) {
return tierCounts;
}
}
| 1,663 |
0 | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness/audit/TestAuditClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import org.apache.gobblin.configuration.State;
public class TestAuditClientFactory implements AuditCountClientFactory {
@Override
public AuditCountClient createAuditCountClient(State state) {
return new TestAuditClient(state);
}
}
| 1,664 |
0 | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/test/java/org/apache/gobblin/completeness/verifier/KafkaAuditCountVerifierTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.verifier;
import java.io.IOException;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.completeness.audit.TestAuditClient;
import org.apache.gobblin.configuration.State;
@Test
public class KafkaAuditCountVerifierTest {
public static final String SOURCE_TIER = "gobblin";
public static final String REFERENCE_TIER = "producer";
public static final String REFERENCE_TIER_1 = "producer_reference";
public static final String REFERENCE_TIERS = REFERENCE_TIER + "," + REFERENCE_TIER_1;
public static final String TOTAL_COUNT_REF_TIER_0 = "producer_0";
public static final String TOTAL_COUNT_REF_TIER_1 = "producer_1";
public static final String TOTAL_COUNT_REFERENCE_TIERS = TOTAL_COUNT_REF_TIER_0 + "," + TOTAL_COUNT_REF_TIER_1;
public void testFetch() throws IOException {
final String topic = "testTopic";
State props = new State();
props.setProp(KafkaAuditCountVerifier.SOURCE_TIER, SOURCE_TIER);
props.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.THRESHOLD, ".99");
TestAuditClient client = new TestAuditClient(props);
KafkaAuditCountVerifier verifier = new KafkaAuditCountVerifier(props, client);
// All complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 1000L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L
));
// Default threshold
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
// 99.999 % complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 999L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L
));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
// <= 99% complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 990L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L
));
Assert.assertFalse(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
}
public void testTotalCountCompleteness() throws IOException {
final String topic = "testTopic";
State props = new State();
props.setProp(KafkaAuditCountVerifier.SOURCE_TIER, SOURCE_TIER);
props.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.TOTAL_COUNT_REFERENCE_TIERS, TOTAL_COUNT_REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.THRESHOLD, ".99");
TestAuditClient client = new TestAuditClient(props);
KafkaAuditCountVerifier verifier = new KafkaAuditCountVerifier(props, client);
// All complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 1000L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L,
TOTAL_COUNT_REF_TIER_0, 600L,
TOTAL_COUNT_REF_TIER_1, 400L
));
// Default threshold
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
// 99.999 % complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 999L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L,
TOTAL_COUNT_REF_TIER_0, 600L,
TOTAL_COUNT_REF_TIER_1, 400L
));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
// <= 99% complete
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 990L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L,
TOTAL_COUNT_REF_TIER_0, 600L,
TOTAL_COUNT_REF_TIER_1, 400L
));
Assert.assertFalse(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
}
public void testEmptyAuditCount() throws IOException {
final String topic = "testTopic";
State props = new State();
props.setProp(KafkaAuditCountVerifier.SOURCE_TIER, SOURCE_TIER);
props.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.TOTAL_COUNT_REFERENCE_TIERS, TOTAL_COUNT_REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.THRESHOLD, ".99");
props.setProp(KafkaAuditCountVerifier.COMPLETE_ON_NO_COUNTS, true);
TestAuditClient client = new TestAuditClient(props);
KafkaAuditCountVerifier verifier = new KafkaAuditCountVerifier(props, client);
// Client gets empty audit count
client.setTierCounts(ImmutableMap.of());
// Should be complete, since COMPLETE_ON_NO_COUNTS=true
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
// Check validation for exceptions if reference tier is 0 count. Checking for division of x / 0 case where x != 0.
// Update watermark if source reports counts but reference counts is 0 due to Kafka not reporting counts
client.setTierCounts(
ImmutableMap.of(
SOURCE_TIER, 990L,
REFERENCE_TIER, 0L,
REFERENCE_TIER_1, 0L,
TOTAL_COUNT_REF_TIER_0, 0L,
TOTAL_COUNT_REF_TIER_1, 0L
));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L).get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L).get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
// Check validation for exceptions if both source and reference tier is 0 count. Checking for division of 0 / 0 case.
// If both source and reference tiers are 0, we assume we are complete and update the watermark
// This is to check the case when one source cluster is reporting counts but not the other source cluster. Resulting in a non-empty map but having 0 for srcCount
client.setTierCounts(
ImmutableMap.of(
SOURCE_TIER, 0L,
REFERENCE_TIER, 0L,
REFERENCE_TIER_1, 0L,
TOTAL_COUNT_REF_TIER_0, 0L,
TOTAL_COUNT_REF_TIER_1, 0L
));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L).get(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L).get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
}
public void testOneCountFailed() throws IOException {
final String topic = "testTopic";
State props = new State();
props.setProp(KafkaAuditCountVerifier.SOURCE_TIER, SOURCE_TIER);
props.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.TOTAL_COUNT_REFERENCE_TIERS, TOTAL_COUNT_REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.THRESHOLD, ".99");
props.setProp(KafkaAuditCountVerifier.COMPLETE_ON_NO_COUNTS, true);
TestAuditClient client = new TestAuditClient(props);
KafkaAuditCountVerifier verifier = new KafkaAuditCountVerifier(props, client);
// Missing total count tier which will throw exception
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 999L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 1000L
));
// Classic completeness is still returned, but total is missing
Assert.assertTrue(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
Assert.assertFalse(verifier.calculateCompleteness(topic, 0L, 0L)
.containsKey(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness));
}
public void testDifferentValueInReferenceTier() throws IOException {
final String topic = "testTopic";
State props = new State();
props.setProp(KafkaAuditCountVerifier.SOURCE_TIER, SOURCE_TIER);
props.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.TOTAL_COUNT_REFERENCE_TIERS, TOTAL_COUNT_REFERENCE_TIERS);
props.setProp(KafkaAuditCountVerifier.THRESHOLD, ".99");
props.setProp(KafkaAuditCountVerifier.COMPLETE_ON_NO_COUNTS, true);
TestAuditClient client = new TestAuditClient(props);
KafkaAuditCountVerifier verifier = new KafkaAuditCountVerifier(props, client);
// Different value in reference tier
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 999L,
REFERENCE_TIER, 1000L,
REFERENCE_TIER_1, 2000L
));
// Classic completeness is fail as 999/2000 < 99.9%
Assert.assertFalse(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
// Different value in reference tier and one tier has 0 in count
client.setTierCounts(ImmutableMap.of(
SOURCE_TIER, 999L,
REFERENCE_TIER, 0L,
REFERENCE_TIER_1, 2000L
));
// Classic completeness is fail as 999/2000 < 99.9%
Assert.assertFalse(verifier.calculateCompleteness(topic, 0L, 0L)
.get(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness));
}
}
| 1,665 |
0 | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness/audit/AuditCountClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import org.apache.gobblin.configuration.State;
/**
* A factory class responsible for creating {@link AuditCountClient}
*/
public interface AuditCountClientFactory {
String AUDIT_COUNT_CLIENT_FACTORY = "audit.count.client.factory";
AuditCountClient createAuditCountClient(State state);
}
| 1,666 |
0 | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness/audit/AuditCountClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import java.io.IOException;
import java.util.Map;
/**
* A type of client used to query audit counts
*/
public interface AuditCountClient {
/**
*
* @param datasetName query dataset
* @param start start timestamp in millis from epoch
* @param end end timestamp in millis from epoch
* @return a map of <tier, counts>
* @throws IOException
*/
Map<String, Long> fetch(String datasetName, long start, long end) throws IOException;
}
| 1,667 |
0 | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness/audit/AuditCountHttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import javax.annotation.concurrent.ThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
/**
* A {@link AuditCountClient} which uses {@link org.apache.http.client.HttpClient}
* to perform audit count query.
*/
@Slf4j
@ThreadSafe
public class AuditCountHttpClient implements AuditCountClient {
// Keys
public static final String AUDIT_HTTP_PREFIX = "audit.http";
public static final String CONNECTION_MAX_TOTAL = AUDIT_HTTP_PREFIX + "max.total";
public static final int DEFAULT_CONNECTION_MAX_TOTAL = 10;
public static final String MAX_PER_ROUTE = AUDIT_HTTP_PREFIX + "max.per.route";
public static final int DEFAULT_MAX_PER_ROUTE = 10;
public static final String AUDIT_REST_BASE_URL = "audit.rest.base.url";
public static final String AUDIT_REST_MAX_TRIES = "audit.rest.max.tries";
public static final String AUDIT_REST_START_QUERYSTRING_KEY = "audit.rest.querystring.start";
public static final String AUDIT_REST_END_QUERYSTRING_KEY = "audit.rest.querystring.end";
public static final String AUDIT_REST_START_QUERYSTRING_DEFAULT = "start";
public static final String AUDIT_REST_END_QUERYSTRING_DEFAULT = "end";
// Http Client
private PoolingHttpClientConnectionManager cm;
private CloseableHttpClient httpClient;
private static final JsonParser PARSER = new JsonParser();
private final String baseUrl;
private final String startQueryString;
private final String endQueryString;
private String topicQueryString = "topic";
private final int maxNumTries;
/**
* Constructor
*/
public AuditCountHttpClient(State state) {
int maxTotal = state.getPropAsInt(CONNECTION_MAX_TOTAL, DEFAULT_CONNECTION_MAX_TOTAL);
int maxPerRoute = state.getPropAsInt(MAX_PER_ROUTE, DEFAULT_MAX_PER_ROUTE);
cm = new PoolingHttpClientConnectionManager();
cm.setMaxTotal(maxTotal);
cm.setDefaultMaxPerRoute(maxPerRoute);
httpClient = HttpClients.custom()
.setConnectionManager(cm)
.build();
this.baseUrl = state.getProp(AUDIT_REST_BASE_URL);
this.maxNumTries = state.getPropAsInt(AUDIT_REST_MAX_TRIES, 5);
this.startQueryString = state.getProp(AUDIT_REST_START_QUERYSTRING_KEY, AUDIT_REST_START_QUERYSTRING_DEFAULT);
this.endQueryString = state.getProp(AUDIT_REST_END_QUERYSTRING_KEY, AUDIT_REST_END_QUERYSTRING_DEFAULT);
}
public Map<String, Long> fetch (String topic, long start, long end) throws IOException {
String fullUrl = (this.baseUrl.endsWith("/") ? this.baseUrl.substring(0, this.baseUrl.length() - 1)
: this.baseUrl) + "?" + this.topicQueryString + "=" + topic
+ "&" + this.startQueryString + "=" + start + "&" + this.endQueryString + "=" + end;
log.info("Full URL is " + fullUrl);
String response = getHttpResponse(fullUrl);
return parseResponse (fullUrl, response, topic);
}
/**
* Expects <code>response</code> being parsed to be as below.
*
* <pre>
* {
* "result": {
* "tier1": 79341895,
* "tier2": 79341892,
* }
* }
* </pre>
*/
@VisibleForTesting
public static Map<String, Long> parseResponse(String fullUrl, String response, String topic) throws IOException {
Map<String, Long> result = Maps.newHashMap();
JsonObject countsPerTier = null;
try {
JsonObject jsonObj = PARSER.parse(response).getAsJsonObject();
countsPerTier = jsonObj.getAsJsonObject("totalsPerTier");
} catch (Exception e) {
throw new IOException(String.format("Unable to parse JSON response: %s for request url: %s ", response,
fullUrl), e);
}
for(Map.Entry<String, JsonElement> entry : countsPerTier.entrySet()) {
String tier = entry.getKey();
long count = Long.parseLong(entry.getValue().getAsString());
result.put(tier, count);
}
return result;
}
private String getHttpResponse(String fullUrl) throws IOException {
HttpUriRequest req = new HttpGet(fullUrl);
for (int numTries = 0;; numTries++) {
try (CloseableHttpResponse response = this.httpClient.execute(req)) {
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 200 || statusCode >= 300) {
throw new IOException(
String.format("status code: %d, reason: %s", statusCode, response.getStatusLine().getReasonPhrase()));
}
return EntityUtils.toString(response.getEntity());
} catch (IOException e) {
String errMsg = "Unable to get or parse HTTP response for " + fullUrl;
if (numTries >= this.maxNumTries) {
throw new IOException (errMsg, e);
}
long backOffSec = (numTries + 1) * 2;
log.error(errMsg + ", will retry in " + backOffSec + " sec", e);
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(backOffSec));
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
}
}
}
}
| 1,668 |
0 | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness/audit/AuditCountHttpClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.audit;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
/**
* Factory to create an instance of type {@link AuditCountHttpClient}
*/
@Alias("AuditCountHttpClientFactory")
public class AuditCountHttpClientFactory implements AuditCountClientFactory {
public AuditCountHttpClient createAuditCountClient (State state) {
return new AuditCountHttpClient(state);
}
}
| 1,669 |
0 | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness | Create_ds/gobblin/gobblin-completeness/src/main/java/org/apache/gobblin/completeness/verifier/KafkaAuditCountVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.completeness.verifier;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.completeness.audit.AuditCountClient;
import org.apache.gobblin.completeness.audit.AuditCountClientFactory;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Use {@link AuditCountClient} to retrieve all record count across different tiers
* Compare one source tier against all other reference tiers and determine
* if verification should be passed based on a pre-defined threshold.
* source tier is the tier being compared against single/multiple reference tiers
*/
@Slf4j
public class KafkaAuditCountVerifier {
public static final String COMPLETENESS_PREFIX = "completeness.";
public static final String SOURCE_TIER = COMPLETENESS_PREFIX + "source.tier";
public static final String REFERENCE_TIERS = COMPLETENESS_PREFIX + "reference.tiers";
public static final String TOTAL_COUNT_REFERENCE_TIERS = COMPLETENESS_PREFIX + "totalCount.reference.tiers";
public static final String THRESHOLD = COMPLETENESS_PREFIX + "threshold";
private static final double DEFAULT_THRESHOLD = 0.999;
public static final String COMPLETE_ON_NO_COUNTS = COMPLETENESS_PREFIX + "complete.on.no.counts";
public enum CompletenessType {
ClassicCompleteness,
TotalCountCompleteness
}
private final boolean returnCompleteOnNoCounts;
private final AuditCountClient auditCountClient;
private final String srcTier;
private final Collection<String> refTiers;
private final Collection<String> totalCountRefTiers;
private final double threshold;
/**
* Constructor with audit count client from state
*/
public KafkaAuditCountVerifier(State state) {
this(state, getAuditClient(state));
}
/**
* Constructor with user specified audit count client
*/
public KafkaAuditCountVerifier(State state, AuditCountClient client) {
this.auditCountClient = client;
this.threshold =
state.getPropAsDouble(THRESHOLD, DEFAULT_THRESHOLD);
this.srcTier = state.getProp(SOURCE_TIER);
this.refTiers = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(state.getProp(REFERENCE_TIERS));
this.totalCountRefTiers = state.contains(TOTAL_COUNT_REFERENCE_TIERS)
? Splitter.on(",").omitEmptyStrings().trimResults().splitToList(state.getProp(TOTAL_COUNT_REFERENCE_TIERS))
: null;
this.returnCompleteOnNoCounts = state.getPropAsBoolean(COMPLETE_ON_NO_COUNTS, false);
}
/**
* Obtain an {@link AuditCountClient} using a {@link AuditCountClientFactory}
* @param state job state
* @return {@link AuditCountClient}
*/
private static AuditCountClient getAuditClient(State state) {
Preconditions.checkArgument(state.contains(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY),
String.format("Audit count factory %s not set ", AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY));
try {
String factoryName = state.getProp(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY);
ClassAliasResolver<AuditCountClientFactory> conditionClassAliasResolver = new ClassAliasResolver<>(AuditCountClientFactory.class);
AuditCountClientFactory factory = conditionClassAliasResolver.resolveClass(factoryName).newInstance();
return factory.createAuditCountClient(state);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public Map<CompletenessType, Boolean> calculateCompleteness(String datasetName, long beginInMillis, long endInMillis)
throws IOException {
return calculateCompleteness(datasetName, beginInMillis, endInMillis, this.threshold);
}
/**
* Compare source tier against reference tiers.
* Compute completion percentage which is true iff the calculated percentages is greater than threshold.
*
* @param datasetName A dataset short name like 'PageViewEvent'
* @param beginInMillis Unix timestamp in milliseconds
* @param endInMillis Unix timestamp in milliseconds
* @param threshold User defined threshold
*
* @return a map of completeness result by CompletenessType
*/
public Map<CompletenessType, Boolean> calculateCompleteness(String datasetName, long beginInMillis, long endInMillis,
double threshold) throws IOException {
Map<String, Long> countsByTier = getTierAndCount(datasetName, beginInMillis, endInMillis);
log.info(String.format("checkTierCounts: audit counts map for %s for range [%s,%s]", datasetName, beginInMillis, endInMillis));
countsByTier.forEach((x,y) -> log.info(String.format(" %s : %s ", x, y)));
Map<CompletenessType, Boolean> result = new HashMap<>();
Arrays.stream(CompletenessType.values()).forEach(type -> {
try {
result.put(type, calculateCompleteness(datasetName, beginInMillis, endInMillis, type, countsByTier) > threshold);
} catch (IOException e) {
log.error("Failed to calculate completeness for type " + type, e);
}
});
return result;
}
private double calculateCompleteness(String datasetName, long beginInMillis, long endInMillis, CompletenessType type,
Map<String, Long> countsByTier) throws IOException {
if (countsByTier.isEmpty() && this.returnCompleteOnNoCounts) {
log.info(String.format("Found empty counts map for %s, returning complete", datasetName));
return 1.0;
}
switch (type) {
case ClassicCompleteness:
return calculateClassicCompleteness(datasetName, beginInMillis, endInMillis, countsByTier);
case TotalCountCompleteness:
return calculateTotalCountCompleteness(datasetName, beginInMillis, endInMillis, countsByTier);
default:
log.error("Skip unsupported completeness type {}", type);
return -1;
}
}
/**
* Compare source tier against reference tiers. For each reference tier, calculates percentage by srcCount/refCount.
* We will return the lowest value, which, in other words, we will wait until src tier catches up to all reference
* tiers (upto 99.9%) to mark that hour as completed.
* @param datasetName A dataset short name like 'PageViewEvent'
* @param beginInMillis Unix timestamp in milliseconds
* @param endInMillis Unix timestamp in milliseconds
*
* @return The lowest percentage value
*/
private double calculateClassicCompleteness(String datasetName, long beginInMillis, long endInMillis,
Map<String, Long> countsByTier) throws IOException {
validateTierCounts(datasetName, beginInMillis, endInMillis, countsByTier, this.srcTier, this.refTiers);
double percent = -1;
for (String refTier: this.refTiers) {
long refCount = countsByTier.get(refTier);
long srcCount = countsByTier.get(this.srcTier);
double tmpPercent;
/*
If we have a case where an audit map is returned, however, one of the source tiers on another fabric is 0,
and the reference tiers from Kafka is reported to be 0, we can say that this hour is complete.
This needs to be added as a non-zero double value divided by 0 is infinity, but 0 divided by 0 is NaN.
*/
if (srcCount == 0 && refCount == 0) {
tmpPercent = 1;
} else {
tmpPercent = (double) srcCount / (double) refCount;
}
percent = percent < 0 ? tmpPercent : Double.min(percent, tmpPercent);
}
if (percent < 0) {
throw new IOException("Cannot calculate completion percentage");
}
return percent;
}
/**
* Check total count based completeness by comparing source tier against reference tiers,
* and calculate the completion percentage by srcCount/sum_of(refCount).
*
* @param datasetName A dataset short name like 'PageViewEvent'
* @param beginInMillis Unix timestamp in milliseconds
* @param endInMillis Unix timestamp in milliseconds
*
* @return The percentage value by srcCount/sum_of(refCount)
*/
private double calculateTotalCountCompleteness(String datasetName, long beginInMillis, long endInMillis,
Map<String, Long> countsByTier) throws IOException {
if (this.totalCountRefTiers == null) {
return -1;
}
validateTierCounts(datasetName, beginInMillis, endInMillis, countsByTier, this.srcTier, this.totalCountRefTiers);
long srcCount = countsByTier.get(this.srcTier);
long totalRefCount = this.totalCountRefTiers
.stream()
.mapToLong(countsByTier::get)
.sum();
/*
If we have a case where an audit map is returned, however, one of the source tiers on another fabric is 0,
and the sum of the reference tiers from Kafka is reported to be 0, we can say that this hour is complete.
This needs to be added as a non-zero double value divided by 0 is infinity, but 0 divided by 0 is NaN.
*/
if (srcCount == 0 && totalRefCount == 0) {
return 1.0;
}
double percent = Double.max(-1, (double) srcCount / (double) totalRefCount);
if (percent < 0) {
throw new IOException("Cannot calculate total count completion percentage");
}
return percent;
}
private static void validateTierCounts(String datasetName, long beginInMillis, long endInMillis, Map<String, Long> countsByTier,
String sourceTier, Collection<String> referenceTiers)
throws IOException {
if (!countsByTier.containsKey(sourceTier)) {
throw new IOException(String.format("Source tier %s audit count cannot be retrieved for dataset %s between %s and %s", sourceTier, datasetName, beginInMillis, endInMillis));
}
for (String refTier: referenceTiers) {
if (!countsByTier.containsKey(refTier)) {
throw new IOException(String.format("Reference tier %s audit count cannot be retrieved for dataset %s between %s and %s", refTier, datasetName, beginInMillis, endInMillis));
}
long refCount = countsByTier.get(refTier);
if (refCount == 0) {
// If count in refTier is 0, it will be assumed that the data for that hour is completed and move the watermark forward.
log.warn(String.format("Reference tier %s audit count is reported to be zero", refCount));
} else if (refCount < 0) {
throw new IOException(String.format("Reference tier %s count cannot be less than zero", refTier));
}
}
}
/**
* Fetch all <tier-count> pairs for a given dataset between a time range
*/
private Map<String, Long> getTierAndCount(String datasetName, long beginInMillis, long endInMillis) throws IOException {
return auditCountClient.fetch(datasetName, beginInMillis, endInMillis);
}
} | 1,670 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util/limiter/RestliServiceBasedLimiterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import com.google.inject.Injector;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.SimpleScopeType;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.restli.EmbeddedRestliServer;
import org.apache.gobblin.restli.throttling.CountBasedPolicy;
import org.apache.gobblin.restli.throttling.LimiterServerResource;
import org.apache.gobblin.restli.throttling.ThrottlingGuiceServletConfig;
import org.apache.gobblin.restli.throttling.ThrottlingPolicyFactory;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import org.testng.annotations.Test;
import lombok.Data;
public class RestliServiceBasedLimiterTest {
@Test
public void test() throws Exception {
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey("res1");
Map<String, String> configMap = ImmutableMap.<String, String>builder()
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
CountBasedPolicy.FACTORY_ALIAS)
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, CountBasedPolicy.COUNT_KEY), "100")
.build();
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
Injector injector = guiceServletConfig.getInjector();
EmbeddedRestliServer server = EmbeddedRestliServer.builder().resources(
Lists.<Class<? extends BaseResource>>newArrayList(LimiterServerResource.class)).injector(injector).build();
SharedResourcesBroker<SimpleScopeType> broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());
try {
server.startAsync();
server.awaitRunning();
RestliServiceBasedLimiter limiter = RestliServiceBasedLimiter.builder()
.requestSender(new RedirectAwareRestClientRequestSender(broker, Lists.newArrayList(server.getURIPrefix())))
.resourceLimited(res1key.getResourceLimitedPath()).serviceIdentifier("service").build();
Assert.assertNotNull(limiter.acquirePermits(20));
Assert.assertNotNull(limiter.acquirePermits(20));
Assert.assertNull(limiter.acquirePermits(1000));
} finally {
if (server.isRunning()) {
server.stopAsync();
server.awaitTerminated();
}
}
}
@Test
public void testServerFailover() throws Exception {
try (Closer closer = Closer.create()) {
SharedLimiterKey res1key = new SharedLimiterKey("res1");
Map<String, String> configMap = Maps.newHashMap();
TestingServer zkTestingServer = closer.register(new TestingServer(-1));
configMap.put(ThrottlingGuiceServletConfig.ZK_STRING_KEY, zkTestingServer.getConnectString());
configMap.put(ThrottlingGuiceServletConfig.HA_CLUSTER_NAME, RestliServiceBasedLimiterTest.class.getSimpleName() + "_cluster");
Config config = ConfigFactory.parseMap(configMap);
RestliServer server2500 = createAndStartServer(config, 2500);
RestliServer server2501 = createAndStartServer(config, 2501);
SharedResourcesBroker<SimpleScopeType> broker =
SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());
RedirectAwareRestClientRequestSender requestSender = new RedirectAwareRestClientRequestSender(broker,
Lists.newArrayList(server2500.getServer().getURIPrefix(), server2501.getServer().getURIPrefix()));
RestliServiceBasedLimiter limiter = RestliServiceBasedLimiter.builder()
.requestSender(requestSender)
.resourceLimited(res1key.getResourceLimitedPath())
.serviceIdentifier("service")
.build();
Assert.assertNotNull(limiter.acquirePermits(20));
limiter.clearAllStoredPermits();
server2500.close();
Assert.assertNotNull(limiter.acquirePermits(20));
Assert.assertEquals(parsePortOfCurrentServerPrefix(requestSender), 2501);
limiter.clearAllStoredPermits();
server2500 = createAndStartServer(config, 2500);
Assert.assertNotNull(limiter.acquirePermits(20));
limiter.clearAllStoredPermits();
// leader is currently 2501
Assert.assertEquals(parsePortOfCurrentServerPrefix(requestSender), 2501);
// set request to 2500 (not leader)
requestSender.updateRestClient(server2500.getServer().getURIPrefix(), "test", null);
Assert.assertEquals(parsePortOfCurrentServerPrefix(requestSender), 2500);
Assert.assertNotNull(limiter.acquirePermits(20));
// verify request sender switched back to leader
Assert.assertEquals(parsePortOfCurrentServerPrefix(requestSender), 2501);
server2501.close();
Assert.assertNotNull(limiter.acquirePermits(20));
limiter.clearAllStoredPermits();
server2500.close();
Assert.assertNull(limiter.acquirePermits(20));
limiter.clearAllStoredPermits();
}
}
private int parsePortOfCurrentServerPrefix(RedirectAwareRestClientRequestSender requestSender) throws
URISyntaxException{
return new URI(requestSender.getCurrentServerPrefix()).getPort();
}
private RestliServer createAndStartServer(Config baseConfig, int port) {
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(baseConfig.withFallback(ConfigFactory.parseMap(ImmutableMap.of(
ThrottlingGuiceServletConfig.LISTENING_PORT, Integer.toString(port)
))));
Injector injector = guiceServletConfig.getInjector();
EmbeddedRestliServer server = EmbeddedRestliServer.builder()
.resources(Lists.<Class<? extends BaseResource>>newArrayList(LimiterServerResource.class))
.injector(injector)
.port(port)
.build();
server.startAsync();
server.awaitRunning();
return new RestliServer(server, guiceServletConfig);
}
@Data
private static class RestliServer {
private final EmbeddedRestliServer server;
private final ThrottlingGuiceServletConfig guiceServletConfig;
public void close() {
this.server.stopAsync();
this.server.awaitTerminated();
this.guiceServletConfig.close();
}
}
}
| 1,671 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util/limiter/MockRequester.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.mockito.Mockito;
import com.google.common.collect.Queues;
import com.linkedin.common.callback.Callback;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.RestLiServiceException;
import org.apache.gobblin.restli.throttling.LimiterServerResource;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
/**
* A mock {@link RestClientRequestSender} that satisfies requests using an embedded
* {@link LimiterServerResource}.
*/
@Slf4j
public class MockRequester implements RequestSender {
private final BlockingQueue<RequestAndCallback> requestAndCallbackQueue;
private final LimiterServerResource limiterServer;
private final long latencyMillis;
private final int requestHandlerThreads;
private boolean started;
private ExecutorService handlerExecutorService;
public MockRequester(LimiterServerResource limiterServer, long latencyMillis, int requestHandlerThreads) {
this.limiterServer = limiterServer;
this.latencyMillis = latencyMillis;
this.requestHandlerThreads = requestHandlerThreads;
this.requestAndCallbackQueue = Queues.newLinkedBlockingQueue();
}
public synchronized void start() {
if (this.started) {
return;
}
this.started = true;
this.handlerExecutorService = Executors.newFixedThreadPool(this.requestHandlerThreads);
for (int i = 0; i < this.requestHandlerThreads; i++) {
this.handlerExecutorService.submit(new RequestHandler());
}
}
public synchronized void stop() {
if (!this.started) {
return;
}
this.handlerExecutorService.shutdownNow();
this.started = false;
}
@Override
public void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback) {
if (!this.started) {
throw new IllegalStateException(MockRequester.class.getSimpleName() + " has not been started.");
}
long nanoTime = System.nanoTime();
long satisfyAt = nanoTime + TimeUnit.MILLISECONDS.toNanos(this.latencyMillis);
this.requestAndCallbackQueue.add(new RequestAndCallback(request, callback, satisfyAt));
}
@Data
public static class RequestAndCallback {
private final PermitRequest request;
private final Callback<Response<PermitAllocation>> callback;
private final long processAfterNanos;
}
private class RequestHandler implements Runnable {
@Override
public void run() {
try {
while (true) {
RequestAndCallback requestAndCallback = MockRequester.this.requestAndCallbackQueue.take();
long nanoTime = System.nanoTime();
long delayNanos = requestAndCallback.getProcessAfterNanos() - nanoTime;
if (delayNanos > 0) {
Thread.sleep(TimeUnit.NANOSECONDS.toMillis(delayNanos));
}
try {
PermitAllocation allocation =
MockRequester.this.limiterServer.getSync(new ComplexResourceKey<>(requestAndCallback.getRequest(), new EmptyRecord()));
Response<PermitAllocation> response = Mockito.mock(Response.class);
Mockito.when(response.getEntity()).thenReturn(allocation);
requestAndCallback.getCallback().onSuccess(response);
} catch (RestLiServiceException rexc) {
RestLiResponseException returnException = Mockito.mock(RestLiResponseException.class);
Mockito.when(returnException.getStatus()).thenReturn(rexc.getStatus().getCode());
requestAndCallback.getCallback().onError(returnException);
}
}
} catch (Throwable t) {
log.error("Error", t);
throw new RuntimeException(t);
}
}
}
}
| 1,672 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util/limiter/RestliLimiterFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.mockito.Mockito;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.linkedin.common.callback.Callback;
import com.linkedin.restli.client.Response;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.SimpleScopeType;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.restli.SharedRestClientKey;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import org.apache.gobblin.util.limiter.broker.SharedLimiterFactory;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
public class RestliLimiterFactoryTest {
@Test
public void testFactory() throws Exception {
SharedResourcesBroker<SimpleScopeType> broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());
MyRequestSender requestSender = new MyRequestSender();
broker.bindSharedResourceAtScope(new RedirectAwareRestClientRequestSender.Factory<>(),
new SharedRestClientKey(RestliLimiterFactory.RESTLI_SERVICE_NAME), SimpleScopeType.GLOBAL, requestSender);
RestliServiceBasedLimiter limiter =
broker.getSharedResource(new RestliLimiterFactory<>(), new SharedLimiterKey("my/resource"));
Assert.assertNotNull(limiter.acquirePermits(10));
Assert.assertEquals(requestSender.requestList.size(), 1);
broker.close();
}
@Test
public void testRestliLimiterCalledByLimiterFactory() throws Exception {
SharedResourcesBroker<SimpleScopeType> broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.empty(), SimpleScopeType.GLOBAL.defaultScopeInstance());
MyRequestSender requestSender = new MyRequestSender();
broker.bindSharedResourceAtScope(new RedirectAwareRestClientRequestSender.Factory<>(),
new SharedRestClientKey(RestliLimiterFactory.RESTLI_SERVICE_NAME), SimpleScopeType.GLOBAL, requestSender);
Limiter limiter =
broker.getSharedResource(new SharedLimiterFactory<>(), new SharedLimiterKey("my/resource"));
Assert.assertNotNull(limiter.acquirePermits(10));
Assert.assertEquals(requestSender.requestList.size(), 1);
broker.close();
}
@Test
public void testSkipGlobalLimiterOnLimiterFactory() throws Exception {
Map<String, String> configMap = ImmutableMap.of(
BrokerConfigurationKeyGenerator.generateKey(new SharedLimiterFactory(), null, null, SharedLimiterFactory.SKIP_GLOBAL_LIMITER_KEY), "true"
);
SharedResourcesBroker<SimpleScopeType> broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.parseMap(configMap), SimpleScopeType.GLOBAL.defaultScopeInstance());
MyRequestSender requestSender = new MyRequestSender();
broker.bindSharedResourceAtScope(new RedirectAwareRestClientRequestSender.Factory<>(),
new SharedRestClientKey(RestliLimiterFactory.RESTLI_SERVICE_NAME), SimpleScopeType.GLOBAL, requestSender);
Limiter limiter =
broker.getSharedResource(new SharedLimiterFactory<>(), new SharedLimiterKey("my/resource"));
Assert.assertNotNull(limiter.acquirePermits(10));
Assert.assertEquals(requestSender.requestList.size(), 0);
broker.close();
}
public static class MyRequestSender implements RequestSender {
List<PermitRequest> requestList = Lists.newArrayList();
@Override
public void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback) {
this.requestList.add(request);
PermitAllocation permitAllocation = new PermitAllocation();
permitAllocation.setPermits(request.getPermits());
permitAllocation.setExpiration(Long.MAX_VALUE);
Response<PermitAllocation> response = Mockito.mock(Response.class);
Mockito.when(response.getEntity()).thenReturn(permitAllocation);
callback.onSuccess(response);
}
}
}
| 1,673 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/util/limiter/BatchedPermitsRequesterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.gobblin.util.Sleeper;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Queues;
import com.linkedin.common.callback.Callback;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.HttpStatus;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import org.apache.gobblin.util.ExecutorsUtils;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
public class BatchedPermitsRequesterTest {
@Test
public void testForwardingOfRequests() throws Exception {
Queue<RequestAndCallback> queue = Queues.newArrayDeque();
BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource")
.requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false)).build();
try (ParallelRequester requester = new ParallelRequester(container)) {
Future<Boolean> future = requester.request(10);
await(new QueueSize(queue, 1), 1000);
Assert.assertEquals(queue.size(), 1);
satisfyRequestBuilder().requestAndCallback(queue.poll()).satisfy();
future.get(1, TimeUnit.SECONDS);
Assert.assertTrue(future.isDone());
Assert.assertTrue(future.get());
}
}
@Test
public void testNoMoreThanOneRequestAtATime() throws Exception {
Queue<RequestAndCallback> queue = Queues.newArrayDeque();
BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource")
.requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false)).build();
try (ParallelRequester requester = new ParallelRequester(container)) {
Future<Boolean> future = requester.request(1);
await(new QueueSize(queue, 1), 1000);
Assert.assertEquals(queue.size(), 1);
Future<Boolean> future2 = requester.request(2);
Future<Boolean> future3 = requester.request(3);
Future<Boolean> future4 = requester.request(4);
Future<Boolean> future5 = requester.request(5);
Thread.sleep(100);
Assert.assertEquals(queue.size(), 1);
satisfyRequestBuilder().requestAndCallback(queue.poll()).satisfy();
future.get(1, TimeUnit.SECONDS);
Assert.assertTrue(future.isDone());
Assert.assertTrue(future.get());
await(new QueueSize(queue, 1), 1000);
Assert.assertEquals(queue.size(), 1);
satisfyRequestBuilder().requestAndCallback(queue.poll()).satisfy();
future2.get(1, TimeUnit.SECONDS);
future3.get(1, TimeUnit.SECONDS);
future4.get(1, TimeUnit.SECONDS);
future5.get(1, TimeUnit.SECONDS);
Assert.assertTrue(future2.get());
Assert.assertTrue(future3.get());
Assert.assertTrue(future4.get());
Assert.assertTrue(future5.get());
}
}
@Test
public void testRetriableFail() throws Exception {
Queue<RequestAndCallback> queue = Queues.newArrayDeque();
BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource")
.requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false))
.maxTimeoutMillis(1000).build();
try (ParallelRequester requester = new ParallelRequester(container)) {
Future<Boolean> future = requester.request(10);
for (int i = 0; i < BatchedPermitsRequester.MAX_RETRIES; i++) {
// container will fail 5 times
await(new QueueSize(queue, 1), 1000);
Assert.assertFalse(future.isDone());
failRequestBuilder().requestAndCallback(queue.poll()).fail();
}
// should return a failure
Assert.assertFalse(future.get());
// should not make any more request
Assert.assertEquals(queue.size(), 0);
}
}
@Test
public void testNonRetriableFail() throws Exception {
Queue<RequestAndCallback> queue = Queues.newArrayDeque();
BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource")
.requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false))
.maxTimeoutMillis(1000).build();
try (ParallelRequester requester = new ParallelRequester(container)) {
Future<Boolean> future = requester.request(10);
// container should only try request once
await(new QueueSize(queue, 1), 1000);
Assert.assertFalse(future.isDone());
failRequestBuilder().requestAndCallback(queue.poll()).errorStatus(HttpStatus.S_422_UNPROCESSABLE_ENTITY).fail();
Assert.assertFalse(future.get());
Assert.assertEquals(queue.size(), 0);
}
}
@Test
public void testWaitToUsePermits() throws Exception {
Queue<RequestAndCallback> queue = Queues.newArrayDeque();
BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource")
.requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false)).build();
Sleeper.MockSleeper mockWaiter = new Sleeper.MockSleeper();
BatchedPermitsRequester.AllocationCallback callback = container.createAllocationCallback(mockWaiter);
PermitAllocation allocation = new PermitAllocation();
allocation.setPermits(10);
allocation.setWaitForPermitUseMillis(20);
allocation.setExpiration(Long.MAX_VALUE);
Response<PermitAllocation> response = Mockito.mock(Response.class);
Mockito.when(response.getEntity()).thenReturn(allocation);
// Normally the semaphore is reserved during a request. Since we're mocking a response without ever starting a request,
// manually reserve the semaphore
Assert.assertTrue(container.reserveSemaphore());
callback.onSuccess(response);
Assert.assertEquals((long) mockWaiter.getRequestedSleeps().peek(), 20);
Assert.assertEquals(container.getPermitBatchContainer().getTotalAvailablePermits(), 10);
// A zero wait will not trigger a wait in the requester
allocation.setWaitForPermitUseMillis(0);
mockWaiter.reset();
callback.onSuccess(response);
Assert.assertTrue(mockWaiter.getRequestedSleeps().isEmpty());
Assert.assertEquals(container.getPermitBatchContainer().getTotalAvailablePermits(), 20);
}
public static class TestRequestSender implements RequestSender {
private final Queue<RequestAndCallback> requestAndCallbacks;
private final boolean autoSatisfyRequests;
public TestRequestSender(Queue<RequestAndCallback> requestAndCallbacks, boolean autoSatisfyRequests) {
this.requestAndCallbacks = requestAndCallbacks;
this.autoSatisfyRequests = autoSatisfyRequests;
}
@Override
public void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback) {
if (this.autoSatisfyRequests) {
satisfyRequestBuilder().requestAndCallback(new RequestAndCallback(request, callback)).satisfy();
} else {
this.requestAndCallbacks.add(new RequestAndCallback(request, callback));
}
}
}
@Builder(builderMethodName = "satisfyRequestBuilder", buildMethodName = "satisfy")
public static void satisfyRequest(RequestAndCallback requestAndCallback, long expiration) {
PermitAllocation allocation = new PermitAllocation();
allocation.setPermits(requestAndCallback.getRequest().getPermits());
allocation.setExpiration(expiration > 0 ? expiration : Long.MAX_VALUE);
Response<PermitAllocation> response = Mockito.mock(Response.class);
Mockito.when(response.getEntity()).thenReturn(allocation);
requestAndCallback.getCallback().onSuccess(response);
}
@Builder(builderMethodName = "failRequestBuilder", buildMethodName = "fail")
public static void failRequest(RequestAndCallback requestAndCallback, Throwable exception, HttpStatus errorStatus) {
Throwable actualException;
if (errorStatus != null) {
RestLiResponseException restException = Mockito.mock(RestLiResponseException.class);
Mockito.when(restException.getStatus()).thenReturn(errorStatus.getCode());
actualException = restException;
} else if (exception != null) {
actualException = exception;
} else {
actualException = new RuntimeException();
}
requestAndCallback.callback.onError(actualException);
}
@Data
public static class RequestAndCallback {
private final PermitRequest request;
private final Callback<Response<PermitAllocation>> callback;
}
private static class ParallelRequester implements Closeable {
private final BatchedPermitsRequester container;
private final ExecutorService executorService;
public ParallelRequester(BatchedPermitsRequester container) {
this.container = container;
this.executorService = Executors.newCachedThreadPool(ExecutorsUtils.newThreadFactory(Optional.<Logger>absent(), Optional.of("parallel-requester-%d")));
}
public Future<Boolean> request(final long permits) {
return this.executorService.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return container.getPermits(permits);
}
});
}
@Override
public void close() throws IOException {
if (this.executorService != null) {
this.executorService.shutdownNow();
}
}
}
private void await(Callable<Boolean> condition, int millis) throws Exception {
while (!condition.call()) {
millis -= 50;
if (millis < 0) {
throw new RuntimeException("Await failed");
}
Thread.sleep(50);
}
}
@AllArgsConstructor
private class QueueSize implements Callable<Boolean> {
private final Queue queue;
private final int size;
@Override
public Boolean call() throws Exception {
return queue.size() == size;
}
}
}
| 1,674 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/restli/throttling/ThrottlingClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Collections;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.inject.Injector;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.Request;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.ResponseFuture;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.restli.EmbeddedRestliServer;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import com.google.common.collect.ImmutableMap;
public class ThrottlingClientTest {
@Test
public void test() throws Exception {
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey("res1");
Map<String, String> configMap = ImmutableMap.<String, String>builder()
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
CountBasedPolicy.FACTORY_ALIAS)
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, CountBasedPolicy.COUNT_KEY), "50")
.put(BrokerConfigurationKeyGenerator.generateKey(factory, null, null, ThrottlingPolicyFactory.FAIL_ON_UNKNOWN_RESOURCE_ID),
"true")
.build();
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
Injector injector = guiceServletConfig.getInjector();
EmbeddedRestliServer server = EmbeddedRestliServer.builder().resources(
Lists.<Class<? extends BaseResource>>newArrayList(LimiterServerResource.class)).injector(injector).build();
try {
server.startAsync();
server.awaitRunning();
final HttpClientFactory http = new HttpClientFactory();
final Client r2Client = new TransportClientAdapter(http.getClient(Collections.<String, String>emptyMap()));
RestClient restClient = new RestClient(r2Client, server.getURIPrefix());
PermitsGetRequestBuilder getBuilder = new PermitsRequestBuilders().get();
PermitRequest res1request = new PermitRequest();
res1request.setPermits(20);
res1request.setResource(res1key.getResourceLimitedPath());
PermitAllocation allocation = getPermitAllocation(res1request, restClient, getBuilder);
Assert.assertEquals(allocation.getPermits(), new Long(20));
allocation = getPermitAllocation(res1request, restClient, getBuilder);
Assert.assertEquals(allocation.getPermits(), new Long(20));
// out of permits
try {
allocation = getPermitAllocation(res1request, restClient, getBuilder);
Assert.fail();
} catch (RestLiResponseException exc) {
Assert.assertEquals(exc.getStatus(), HttpStatus.S_403_FORBIDDEN.getCode());
}
PermitRequest invalidRequest = new PermitRequest();
invalidRequest.setPermits(20);
invalidRequest.setResource("invalidkey");
try {
allocation = getPermitAllocation(invalidRequest, restClient, getBuilder);
Assert.fail();
} catch (RestLiResponseException exc) {
Assert.assertEquals(exc.getStatus(), 422);
}
} finally {
if (server.isRunning()) {
server.stopAsync();
server.awaitTerminated();
}
}
}
private PermitAllocation getPermitAllocation(PermitRequest permitRequest, RestClient restClient,
PermitsGetRequestBuilder getBuilder) throws Exception {
Request<PermitAllocation> request = getBuilder.id(new ComplexResourceKey<>(permitRequest, new EmptyRecord())).build();
ResponseFuture<PermitAllocation> responseFuture = restClient.sendRequest(request);
Response<PermitAllocation> response = responseFuture.getResponse();
return response.getEntity();
}
}
| 1,675 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/test/java/org/apache/gobblin/restli/throttling/LocalStressTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.Maps;
import com.google.inject.Key;
import com.google.inject.name.Names;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.MockRequester;
import org.apache.gobblin.util.limiter.RestliServiceBasedLimiter;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import org.apache.gobblin.util.limiter.stressTest.RateComputingLimiterContainer;
import org.apache.gobblin.util.limiter.stressTest.StressTestUtils;
import org.apache.gobblin.util.limiter.stressTest.Stressor;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A stress test for throttling service. It creates a number of threads, each one running a stressor using a mock
* {@link RestliServiceBasedLimiter}.
*
* The mock {@link RestliServiceBasedLimiter} sends requests to an embedded {@link LimiterServerResource}, adding an
* artificial latency to the requests representing the network latency.
*
* The stress test prints permit granting statistics every 15 seconds.
*/
@Slf4j
public class LocalStressTest {
public static final Option STRESSOR_THREADS =
new Option("stressorThreads", true, "Number of stressor threads");
public static final Option PROCESSOR_THREADS =
new Option("processorThreads", true, "Number of request processor threads.");
public static final Option ARTIFICIAL_LATENCY =
new Option("latency", true, "Artificial request latency in millis.");
public static final Option QPS =
new Option("qps", true, "Target qps.");
public static final Options OPTIONS = StressTestUtils.OPTIONS.addOption(STRESSOR_THREADS).addOption(PROCESSOR_THREADS);
public static final int DEFAULT_STRESSOR_THREADS = 10;
public static final int DEFAULT_PROCESSOR_THREADS = 10;
public static final int DEFAULT_ARTIFICIAL_LATENCY = 100;
public static final int DEFAULT_TARGET_QPS = 100;
public static void main(String[] args) throws Exception {
CommandLine cli = StressTestUtils.parseCommandLine(OPTIONS, args);
int stressorThreads = Integer.parseInt(cli.getOptionValue(STRESSOR_THREADS.getOpt(), Integer.toString(
DEFAULT_STRESSOR_THREADS)));
int processorThreads = Integer.parseInt(cli.getOptionValue(PROCESSOR_THREADS.getOpt(), Integer.toString(
DEFAULT_PROCESSOR_THREADS)));
int artificialLatency = Integer.parseInt(cli.getOptionValue(ARTIFICIAL_LATENCY.getOpt(), Integer.toString(
DEFAULT_ARTIFICIAL_LATENCY)));
long targetQps = Integer.parseInt(cli.getOptionValue(QPS.getOpt(), Integer.toString(
DEFAULT_TARGET_QPS)));
Configuration configuration = new Configuration();
StressTestUtils.populateConfigFromCli(configuration, cli);
String resourceLimited = LocalStressTest.class.getSimpleName();
Map<String, String> configMap = Maps.newHashMap();
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey(resourceLimited);
configMap.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
QPSPolicy.FACTORY_ALIAS);
configMap.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, QPSPolicy.QPS),
Long.toString(targetQps));
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
LimiterServerResource limiterServer = guiceServletConfig.getInjector().getInstance(LimiterServerResource.class);
RateComputingLimiterContainer limiterContainer = new RateComputingLimiterContainer();
Class<? extends Stressor> stressorClass =
configuration.getClass(StressTestUtils.STRESSOR_CLASS, StressTestUtils.DEFAULT_STRESSOR_CLASS, Stressor.class);
ExecutorService executorService = Executors.newFixedThreadPool(stressorThreads);
SharedResourcesBroker broker =
guiceServletConfig.getInjector().getInstance(Key.get(SharedResourcesBroker.class, Names.named(LimiterServerResource.BROKER_INJECT_NAME)));
ThrottlingPolicy policy = (ThrottlingPolicy) broker.getSharedResource(new ThrottlingPolicyFactory(),
new SharedLimiterKey(resourceLimited));
ScheduledExecutorService reportingThread = Executors.newSingleThreadScheduledExecutor();
reportingThread.scheduleAtFixedRate(new Reporter(limiterContainer, policy), 0, 15, TimeUnit.SECONDS);
Queue<Future<?>> futures = new LinkedList<>();
MockRequester requester = new MockRequester(limiterServer, artificialLatency, processorThreads);
requester.start();
for (int i = 0; i < stressorThreads; i++) {
RestliServiceBasedLimiter restliLimiter = RestliServiceBasedLimiter.builder().resourceLimited(resourceLimited)
.requestSender(requester)
.serviceIdentifier("stressor" + i).build();
Stressor stressor = stressorClass.newInstance();
stressor.configure(configuration);
futures.add(executorService.submit(new StressorRunner(limiterContainer.decorateLimiter(restliLimiter),
stressor)));
}
int stressorFailures = 0;
for (Future<?> future : futures) {
try {
future.get();
} catch (ExecutionException ee) {
stressorFailures++;
}
}
requester.stop();
executorService.shutdownNow();
if (stressorFailures > 0) {
log.error("There were " + stressorFailures + " failed stressor threads.");
}
System.exit(stressorFailures);
}
@RequiredArgsConstructor
private static class StressorRunner implements Runnable {
private final Limiter limiter;
private final Stressor stressor;
@Override
public void run() {
try {
this.limiter.start();
this.stressor.run(this.limiter);
this.limiter.stop();
} catch (InterruptedException ie) {
log.error("Error: ", ie);
}
}
}
@RequiredArgsConstructor
private static class Reporter implements Runnable {
private final RateComputingLimiterContainer limiter;
private final ThrottlingPolicy policy;
@Override
public void run() {
DescriptiveStatistics stats = limiter.getRateStatsSinceLastReport();
if (stats != null) {
log.info(String.format("Requests rate stats: count: %d, min: %f, max: %f, mean: %f, std: %f, sum: %f", stats.getN(),
stats.getMin(), stats.getMax(), stats.getMean(), stats.getStandardDeviation(), stats.getSum()));
}
stats = limiter.getUnusedPermitsSinceLastReport();
if (stats != null) {
log.info(String.format("Unused permits rate stats: count: %d, min: %f, max: %f, mean: %f, std: %f, sum: %f", stats.getN(),
stats.getMin(), stats.getMax(), stats.getMean(), stats.getStandardDeviation(), stats.getSum()));
}
if (this.policy instanceof QPSPolicy) {
QPSPolicy qpsPolicy = (QPSPolicy) this.policy;
DynamicTokenBucket dynamicTokenBucket = qpsPolicy.getTokenBucket();
TokenBucket tokenBucket = dynamicTokenBucket.getTokenBucket();
log.info("Stored tokens: " + tokenBucket.getStoredTokens());
}
}
}
}
| 1,676 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/RestliServiceBasedLimiter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.codahale.metrics.Meter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.NoopCloseable;
import java.io.Closeable;
import lombok.Builder;
import lombok.Getter;
/**
* A {@link Limiter} that forwards permit requests to a Rest.li throttling service endpoint.
*/
public class RestliServiceBasedLimiter implements Limiter {
public static final String PERMITS_REQUESTED_METER_NAME = "limiter.restli.permitsRequested";
public static final String PERMITS_GRANTED_METER_NAME = "limiter.restli.permitsGranted";
@Getter @VisibleForTesting
private final BatchedPermitsRequester bachedPermitsContainer;
private final Optional<MetricContext> metricContext;
private final Optional<Meter> permitsRequestedMeter;
private final Optional<Meter> permitsGrantedMeter;
@Builder
private RestliServiceBasedLimiter(String resourceLimited, String serviceIdentifier,
MetricContext metricContext, RequestSender requestSender, long permitRequestTimeoutMillis) {
Preconditions.checkNotNull(requestSender, "Request sender cannot be null.");
this.bachedPermitsContainer = BatchedPermitsRequester.builder()
.resourceId(resourceLimited).requestorIdentifier(serviceIdentifier).requestSender(requestSender)
.maxTimeoutMillis(permitRequestTimeoutMillis).build();
this.metricContext = Optional.fromNullable(metricContext);
if (this.metricContext.isPresent()) {
this.permitsRequestedMeter = Optional.of(this.metricContext.get().meter(PERMITS_REQUESTED_METER_NAME));
this.permitsGrantedMeter = Optional.of(this.metricContext.get().meter(PERMITS_GRANTED_METER_NAME));
} else {
this.permitsRequestedMeter = Optional.absent();
this.permitsGrantedMeter = Optional.absent();
}
}
@Override
public void start() {
// Do nothing
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
Instrumented.markMeter(this.permitsRequestedMeter, permits);
boolean permitsGranted = this.bachedPermitsContainer.getPermits(permits);
Instrumented.markMeter(this.permitsGrantedMeter, permits);
return permitsGranted ? NoopCloseable.INSTANCE : null;
}
@Override
public void stop() {
// Do nothing
}
/**
* @return the number of permits acquired from the server and not yet used.
*/
@VisibleForTesting
public long getUnusedPermits() {
return this.bachedPermitsContainer.getPermitBatchContainer().getTotalAvailablePermits();
}
@VisibleForTesting
public void clearAllStoredPermits() {
this.bachedPermitsContainer.clearAllStoredPermits();
}
}
| 1,677 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/RestliLimiterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.google.common.collect.ImmutableMap;
import com.linkedin.restli.client.RestClient;
import org.apache.gobblin.broker.ResourceCoordinate;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.metrics.broker.MetricContextFactory;
import org.apache.gobblin.metrics.broker.MetricContextKey;
import org.apache.gobblin.metrics.broker.SubTaggedMetricContextKey;
import org.apache.gobblin.restli.SharedRestClientKey;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link org.apache.gobblin.util.limiter.broker.SharedLimiterFactory} that creates {@link RestliServiceBasedLimiter}s. It
* automatically acquires a {@link RestClient} from the broker for restli service name {@link #RESTLI_SERVICE_NAME}.
*/
@Slf4j
public class RestliLimiterFactory<S extends ScopeType<S>>
implements SharedResourceFactory<RestliServiceBasedLimiter, SharedLimiterKey, S> {
public static final String FACTORY_NAME = "limiter.restli";
public static final String RESTLI_SERVICE_NAME = "throttling";
public static final String SERVICE_IDENTIFIER_KEY = "serviceId";
public static final String PERMIT_REQUEST_TIMEOUT = "permitRequestTimeoutMillis";
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<RestliServiceBasedLimiter> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, SharedLimiterKey> config) throws NotConfiguredException {
S scope = config.getScope();
if (scope != scope.rootScope()) {
return new ResourceCoordinate<>(this, config.getKey(), scope.rootScope());
}
String serviceIdentifier = config.getConfig().hasPath(SERVICE_IDENTIFIER_KEY) ?
config.getConfig().getString(SERVICE_IDENTIFIER_KEY) : "UNKNOWN";
String resourceLimited = config.getKey().getResourceLimitedPath();
MetricContextKey metricContextKey =
new SubTaggedMetricContextKey(RestliServiceBasedLimiter.class.getSimpleName() + "_" + resourceLimited,
ImmutableMap.of("resourceLimited", resourceLimited));
long permitRequestTimeout = config.getConfig().hasPath(PERMIT_REQUEST_TIMEOUT)
? config.getConfig().getLong(PERMIT_REQUEST_TIMEOUT) : 0L;
return new ResourceInstance<>(
RestliServiceBasedLimiter.builder()
.resourceLimited(resourceLimited)
.serviceIdentifier(serviceIdentifier)
.metricContext(broker.getSharedResource(new MetricContextFactory<S>(), metricContextKey))
.requestSender(broker.getSharedResource(new RedirectAwareRestClientRequestSender.Factory<S>(), new SharedRestClientKey(RESTLI_SERVICE_NAME)))
.permitRequestTimeoutMillis(permitRequestTimeout)
.build()
);
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, SharedLimiterKey> config) {
return broker.selfScope().getType().rootScope();
}
}
| 1,678 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/RequestSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.linkedin.common.callback.Callback;
import com.linkedin.restli.client.Response;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
/**
* Used to send a {@link PermitRequest}s to a Throttling server.
*/
public interface RequestSender {
void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback);
class NonRetriableException extends Exception {
public NonRetriableException(String message, Throwable cause) {
super(message, cause);
}
public NonRetriableException(String message) {
super(message);
}
}
}
| 1,679 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/RedirectAwareRestClientRequestSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.net.ConnectException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.annotations.VisibleForTesting;
import com.linkedin.common.callback.Callback;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.RetriableRequestException;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.HttpStatus;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.restli.SharedRestClientFactory;
import org.apache.gobblin.restli.SharedRestClientKey;
import org.apache.gobblin.restli.UriRestClientKey;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import org.apache.gobblin.util.ExponentialBackoff;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link RequestSender} that handles redirects and unreachable uris transparently.
*/
@Slf4j
public class RedirectAwareRestClientRequestSender extends RestClientRequestSender {
private static final int MIN_RETRIES = 3;
/**
* A {@link SharedResourceFactory} that creates {@link RedirectAwareRestClientRequestSender}s.
* @param <S>
*/
public static class Factory<S extends ScopeType<S>> implements SharedResourceFactory<RequestSender, SharedRestClientKey, S> {
@Override
public String getName() {
return SharedRestClientFactory.FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<RequestSender> createResource(
SharedResourcesBroker<S> broker, ScopedConfigView<S, SharedRestClientKey> config)
throws NotConfiguredException {
try {
List<String> connectionPrefixes = SharedRestClientFactory.parseConnectionPrefixes(config.getConfig(), config.getKey());
return new ResourceInstance<>(
new RedirectAwareRestClientRequestSender(broker, connectionPrefixes));
} catch (URISyntaxException use) {
throw new RuntimeException(use);
}
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, SharedRestClientKey> config) {
return broker.selfScope().getType().rootScope();
}
}
private final SharedResourcesBroker<?> broker;
private final List<String> connectionPrefixes;
private volatile int lastPrefixAttempted = -1;
private volatile RestClient restClient;
@Getter
private volatile String currentServerPrefix;
private String lastLogPrefix = "";
private AtomicInteger requestsSinceLastLog = new AtomicInteger(0);
private long lastLogTimeNanos = 0;
/**
* @param broker {@link SharedResourcesBroker} used to create {@link RestClient}s.
* @param connectionPrefixes List of uri prefixes of available servers.
* @throws NotConfiguredException
*/
public RedirectAwareRestClientRequestSender(SharedResourcesBroker<?> broker, List<String> connectionPrefixes)
throws NotConfiguredException {
this.broker = broker;
this.connectionPrefixes = connectionPrefixes;
updateRestClient(getNextConnectionPrefix(), "service start", null);
}
private String getNextConnectionPrefix() {
if (this.lastPrefixAttempted < 0) {
this.lastPrefixAttempted = new Random().nextInt(this.connectionPrefixes.size());
}
this.lastPrefixAttempted = (this.lastPrefixAttempted + 1) % this.connectionPrefixes.size();
log.info("Round robin: " + this.lastPrefixAttempted);
return this.connectionPrefixes.get(this.lastPrefixAttempted);
}
@Override
public void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback) {
logRequest();
super.sendRequest(request, callback);
}
private void logRequest() {
String prefix = getCurrentServerPrefix();
if (!prefix.equals(this.lastLogPrefix)) {
logAggregatedRequests(this.lastLogPrefix);
log.info("Sending request to " + prefix);
this.lastLogPrefix = prefix;
return;
}
this.requestsSinceLastLog.incrementAndGet();
log.debug("Sending request to {}", prefix);
if (TimeUnit.SECONDS.convert(System.nanoTime() - this.lastLogTimeNanos, TimeUnit.NANOSECONDS) > 60) { // 1 minute
logAggregatedRequests(prefix);
}
}
private void logAggregatedRequests(String prefix) {
int requests = this.requestsSinceLastLog.getAndSet(0);
long time = System.nanoTime();
long elapsedMillis = TimeUnit.MILLISECONDS.convert(time - this.lastLogTimeNanos, TimeUnit.NANOSECONDS);
this.lastLogTimeNanos = time;
if (requests > 0) {
log.info(String.format("Made %d requests to %s over the last %d millis.", requests, prefix, elapsedMillis));
}
}
@Override
protected RestClient getRestClient() {
return this.restClient;
}
@Override
protected Callback<Response<PermitAllocation>> decorateCallback(PermitRequest request,
Callback<Response<PermitAllocation>> callback) {
if (callback instanceof CallbackDecorator) {
return callback;
}
return new CallbackDecorator(request, callback);
}
@VisibleForTesting
void updateRestClient(String uri, String reason, Throwable errorCause) throws NotConfiguredException {
if (errorCause == null) {
log.info(String.format("Switching to server prefix %s due to: %s", uri, reason));
} else {
log.error(String.format("Switching to server prefix %s due to: %s", uri, reason), errorCause);
}
this.currentServerPrefix = uri;
this.restClient = (RestClient) this.broker.getSharedResource(new SharedRestClientFactory(),
new UriRestClientKey(RestliLimiterFactory.RESTLI_SERVICE_NAME, uri));
}
/**
* A {@link Callback} decorator that intercepts certain errors (301 redirects and {@link ConnectException}s) and
* retries transparently.
*/
@RequiredArgsConstructor
private class CallbackDecorator implements Callback<Response<PermitAllocation>> {
private final PermitRequest originalRequest;
private final Callback<Response<PermitAllocation>> underlying;
private final ExponentialBackoff exponentialBackoff = ExponentialBackoff.builder().maxDelay(10000L).initialDelay(500L).build();
private int redirects = 0;
private int retries = 0;
@Override
public void onError(Throwable error) {
try {
if (error instanceof RestLiResponseException &&
((RestLiResponseException) error).getStatus() == HttpStatus.S_301_MOVED_PERMANENTLY.getCode()) {
this.redirects++;
if (this.redirects >= 5) {
this.underlying.onError(new NonRetriableException("Too many redirects."));
}
RestLiResponseException responseExc = (RestLiResponseException) error;
String newUri = (String) responseExc.getErrorDetails().get("Location");
RedirectAwareRestClientRequestSender.this.updateRestClient(
SharedRestClientFactory.resolveUriPrefix(new URI(newUri)), "301 redirect", null);
this.exponentialBackoff.awaitNextRetry();
sendRequest(this.originalRequest, this);
} else if (error instanceof RemoteInvocationException
&& shouldCatchExceptionAndSwitchUrl((RemoteInvocationException) error)) {
this.retries++;
if (this.retries > RedirectAwareRestClientRequestSender.this.connectionPrefixes.size() + MIN_RETRIES) {
this.underlying.onError(new NonRetriableException("Failed to connect to all available connection prefixes.", error));
}
updateRestClient(getNextConnectionPrefix(), "Failed to communicate with " + getCurrentServerPrefix(), error);
this.exponentialBackoff.awaitNextRetry();
sendRequest(this.originalRequest, this);
} else {
this.underlying.onError(error);
}
} catch (Throwable t) {
this.underlying.onError(t);
}
}
@Override
public void onSuccess(Response<PermitAllocation> result) {
this.underlying.onSuccess(result);
}
}
public boolean shouldCatchExceptionAndSwitchUrl(RemoteInvocationException exc) {
return exc.getCause() instanceof RetriableRequestException || exc.getCause() instanceof ConnectException
|| exc.getCause() instanceof TimeoutException;
}
}
| 1,680 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/BatchedPermitsRequester.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Ordering;
import com.google.common.collect.TreeMultimap;
import com.linkedin.common.callback.Callback;
import com.linkedin.data.template.GetMode;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.HttpStatus;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import org.apache.gobblin.restli.throttling.ThrottlingProtocolVersion;
import org.apache.gobblin.util.ClosableTimerContext;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.NoopCloseable;
import org.apache.gobblin.util.Sleeper;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javax.annotation.Nullable;
import javax.annotation.concurrent.NotThreadSafe;
import lombok.AccessLevel;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* An object that requests batches of permits from an external throttling server. It tries to hide the latency of doing
* external permit requests by requesting them in batches and preemptively requesting permits before the current ones
* are exhausted.
*/
@Slf4j
class BatchedPermitsRequester {
public static final String REST_REQUEST_TIMER = "limiter.restli.restRequestTimer";
public static final String REST_REQUEST_PERMITS_HISTOGRAM = "limiter.restli.restRequestPermitsHistogram";
/** These status codes are considered non-retriable. */
public static final ImmutableSet<Integer> NON_RETRIABLE_ERRORS = ImmutableSet.of(HttpStatus.S_403_FORBIDDEN.getCode(),
HttpStatus.S_422_UNPROCESSABLE_ENTITY.getCode());
/** Target frequency at which external requests are performed. */
public static final long DEFAULT_TARGET_MILLIS_BETWEEN_REQUESTS = 10000;
/** Maximum number of retries to communicate with the server. */
protected static final int MAX_RETRIES = 5;
private static final long RETRY_DELAY_ON_NON_RETRIABLE_EXCEPTION = 60000; // 10 minutes
private static final double MAX_DEPLETION_RATE = 1e20;
public static final int MAX_GROWTH_REQUEST = 2;
private static final long GET_PERMITS_MAX_SLEEP_MILLIS = 1000;
private static final ScheduledExecutorService SCHEDULE_EXECUTOR_SERVICE =
Executors.newScheduledThreadPool(1, ExecutorsUtils.newDaemonThreadFactory(Optional.of(log),
Optional.of(BatchedPermitsRequester.class.getName() + "-schedule-%d")));
@Getter(AccessLevel.PROTECTED) @VisibleForTesting
private final PermitBatchContainer permitBatchContainer;
private final Lock lock;
private final Condition newPermitsAvailable;
private final Semaphore requestSemaphore;
private final PermitRequest basePermitRequest;
private final RequestSender requestSender;
private final Timer restRequestTimer;
private final Histogram restRequestHistogram;
private volatile AtomicInteger retries = new AtomicInteger(0);
private final RetryStatus retryStatus;
private final SynchronizedAverager permitsOutstanding;
private final long targetMillisBetweenRequests;
private final AtomicLong callbackCounter;
/** Permit requests will timeout after this many millis. */
private final long maxTimeout;
/** Any request larger than this is known to be impossible to satisfy. */
private long knownUnsatisfiablePermits;
private volatile AllocationCallback currentCallback;
@Builder
private BatchedPermitsRequester(String resourceId, String requestorIdentifier,
long targetMillisBetweenRequests, RequestSender requestSender, MetricContext metricContext, long maxTimeoutMillis) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(resourceId), "Must provide a resource id.");
Preconditions.checkArgument(!Strings.isNullOrEmpty(requestorIdentifier), "Must provide a requestor identifier.");
this.permitBatchContainer = new PermitBatchContainer();
this.lock = new ReentrantLock();
this.newPermitsAvailable = this.lock.newCondition();
/** Ensures there is only one in-flight request at a time. */
this.requestSemaphore = new Semaphore(1);
/** Number of not-yet-satisfied permits. */
this.permitsOutstanding = new SynchronizedAverager();
this.targetMillisBetweenRequests = targetMillisBetweenRequests > 0 ? targetMillisBetweenRequests :
DEFAULT_TARGET_MILLIS_BETWEEN_REQUESTS;
this.requestSender = requestSender;
this.retryStatus = new RetryStatus();
this.basePermitRequest = new PermitRequest();
this.basePermitRequest.setResource(resourceId);
this.basePermitRequest.setRequestorIdentifier(requestorIdentifier);
this.restRequestTimer = metricContext == null ? null : metricContext.timer(REST_REQUEST_TIMER);
this.restRequestHistogram = metricContext == null ? null : metricContext.histogram(REST_REQUEST_PERMITS_HISTOGRAM);
this.callbackCounter = new AtomicLong();
this.maxTimeout = maxTimeoutMillis > 0 ? maxTimeoutMillis : 120000;
this.knownUnsatisfiablePermits = Long.MAX_VALUE;
}
/**
* Try to get a number of permits from this requester.
* @return true if permits were obtained successfully.
*/
public boolean getPermits(long permits) throws InterruptedException {
if (permits <= 0) {
return true;
}
long startTimeNanos = System.nanoTime();
this.permitsOutstanding.addEntryWithWeight(permits);
this.lock.lock();
try {
while (true) {
if (permits >= this.knownUnsatisfiablePermits) {
// We are requesting more permits than the remote policy will ever be able to satisfy, return immediately with no permits
log.warn(String.format("Server has indicated number of permits is unsatisfiable. "
+ "Permits requested: %d, known unsatisfiable permits: %d ", permits, this.knownUnsatisfiablePermits));
break;
}
if (elapsedMillis(startTimeNanos) > this.maxTimeout) {
// Max timeout reached, break
log.warn("Reached timeout waiting for permits. Timeout: " + this.maxTimeout);
break;
}
if (this.permitBatchContainer.tryTake(permits)) {
this.permitsOutstanding.removeEntryWithWeight(permits);
return true;
}
if (this.retryStatus.canRetryWithinMillis(remainingTime(startTimeNanos, this.maxTimeout))) {
long callbackCounterSnap = this.callbackCounter.get();
maybeSendNewPermitRequest();
if (this.callbackCounter.get() == callbackCounterSnap) {
// If a callback has happened since we tried to send the new permit request, don't await
// Since some request senders may be synchronous, we would have missed the notification
boolean ignore = this.newPermitsAvailable.await(
Math.min(GET_PERMITS_MAX_SLEEP_MILLIS, remainingTime(startTimeNanos, this.maxTimeout)), TimeUnit.MILLISECONDS);
}
} else {
break;
}
}
} finally {
this.lock.unlock();
}
this.permitsOutstanding.removeEntryWithWeight(permits);
return false;
}
private long remainingTime(long startTimeNanos, long timeout) {
return Math.max(timeout - elapsedMillis(startTimeNanos), 0);
}
private long elapsedMillis(long startTimeNanos) {
return (System.nanoTime() - startTimeNanos)/1000000;
}
/**
* Send a new permit request to the server.
*/
private synchronized void maybeSendNewPermitRequest() {
while (!this.requestSemaphore.tryAcquire()) {
if (this.currentCallback == null) {
throw new IllegalStateException("Semaphore is unavailable while callback is null!");
}
if (this.currentCallback.elapsedTime() > 30000) {
// If the previous callback has not returned after 30s, we consider the call lost and try again
// Note we expect Rest.li to call onError for most failure situations, this logic just handles the edge
// case were Rest.li fails somehow and we don't want to just hang.
log.warn("Last request did not return after 30s, considering it lost and retrying.");
this.currentCallback.clearCallback();
} else {
return;
}
}
if (!this.retryStatus.canRetryNow()) {
clearSemaphore();
return;
}
try {
long permits = computeNextPermitRequest();
if (permits <= 0) {
clearSemaphore();
return;
}
PermitRequest permitRequest = this.basePermitRequest.copy();
permitRequest.setPermits(permits);
permitRequest.setMinPermits((long) this.permitsOutstanding.getAverageWeightOrZero());
permitRequest.setVersion(ThrottlingProtocolVersion.WAIT_ON_CLIENT.ordinal());
if (BatchedPermitsRequester.this.restRequestHistogram != null) {
BatchedPermitsRequester.this.restRequestHistogram.update(permits);
}
log.debug("Sending permit request " + permitRequest);
this.currentCallback = new AllocationCallback(
BatchedPermitsRequester.this.restRequestTimer == null ? NoopCloseable.INSTANCE :
new ClosableTimerContext(BatchedPermitsRequester.this.restRequestTimer.time()), new Sleeper());
this.requestSender.sendRequest(permitRequest, currentCallback);
} catch (CloneNotSupportedException cnse) {
// This should never happen.
clearSemaphore();
throw new RuntimeException(cnse);
}
}
@VisibleForTesting
synchronized boolean reserveSemaphore() {
return this.requestSemaphore.tryAcquire();
}
private synchronized void clearSemaphore() {
if (this.requestSemaphore.availablePermits() > 0) {
throw new IllegalStateException("Semaphore should have 0 permits!");
}
BatchedPermitsRequester.this.requestSemaphore.release();
BatchedPermitsRequester.this.currentCallback = null;
}
/**
* @return the number of permits we should request in the next request.
*/
private long computeNextPermitRequest() {
long candidatePermits = 0;
long unsatisfiablePermits = this.permitsOutstanding.getTotalWeight() - this.permitBatchContainer.totalAvailablePermits;
if (unsatisfiablePermits > 0) {
candidatePermits = unsatisfiablePermits;
}
if (this.permitBatchContainer.batches.size() > 1) {
// If there are multiple batches in the queue, don't create a new request
return candidatePermits;
}
PermitBatch firstBatch = Iterables.getFirst(this.permitBatchContainer.batches.values(), null);
if (firstBatch != null) {
// If the current batch has more than 20% permits left, don't create a new request
if ((double) firstBatch.getPermits() / firstBatch.getInitialPermits() > 0.2) {
return candidatePermits;
}
double averageDepletionRate = firstBatch.getAverageDepletionRate();
long candidatePermitsByDepletion =
Math.min((long) (averageDepletionRate * this.targetMillisBetweenRequests), MAX_GROWTH_REQUEST *
firstBatch.getInitialPermits());
return Math.max(candidatePermits, candidatePermitsByDepletion);
} else {
return candidatePermits;
}
}
@VisibleForTesting
AllocationCallback createAllocationCallback(Sleeper sleeper) {
return new AllocationCallback(new NoopCloseable(), sleeper);
}
/**
* Callback for Rest request.
*/
@VisibleForTesting
class AllocationCallback implements Callback<Response<PermitAllocation>> {
private final Closeable timerContext;
private final Sleeper sleeper;
private final long startTime = System.currentTimeMillis();
private volatile boolean callbackCleared = false;
public AllocationCallback(Closeable timerContext, Sleeper sleeper) {
this.timerContext = timerContext;
this.sleeper = sleeper;
}
@Override
public void onError(Throwable exc) {
BatchedPermitsRequester.this.lock.lock();
try {
if (exc instanceof RequestSender.NonRetriableException) {
nonRetriableFail(exc, "Encountered non retriable error. ");
}
if (exc instanceof RestLiResponseException) {
int errorCode = ((RestLiResponseException) exc).getStatus();
if (NON_RETRIABLE_ERRORS.contains(errorCode)) {
nonRetriableFail(exc, "Encountered non retriable error. HTTP response code: " + errorCode);
}
}
BatchedPermitsRequester.this.retries.incrementAndGet();
if (BatchedPermitsRequester.this.retries.get() >= MAX_RETRIES) {
nonRetriableFail(exc, "Too many failures trying to communicate with throttling service.");
} else {
clearCallback();
// retry
maybeSendNewPermitRequest();
}
} catch (Throwable t) {
log.error("Error on batched permits container.", t);
} finally {
BatchedPermitsRequester.this.lock.unlock();
try {
this.timerContext.close();
} catch (IOException ioe) {
// Do nothing
}
}
}
@Override
public void onSuccess(Response<PermitAllocation> result) {
BatchedPermitsRequester.this.retries.set(0);
BatchedPermitsRequester.this.callbackCounter.incrementAndGet();
BatchedPermitsRequester.this.lock.lock();
try {
PermitAllocation allocation = result.getEntity();
log.debug("Received permit allocation " + allocation);
Long retryDelay = allocation.getMinRetryDelayMillis(GetMode.NULL);
if (retryDelay != null) {
BatchedPermitsRequester.this.retryStatus.blockRetries(retryDelay, null);
}
long waitForUse = allocation.getWaitForPermitUseMillis(GetMode.DEFAULT);
if (waitForUse > 0) {
this.sleeper.sleep(waitForUse);
}
if (allocation.getUnsatisfiablePermits(GetMode.DEFAULT) > 0) {
BatchedPermitsRequester.this.knownUnsatisfiablePermits = allocation.getUnsatisfiablePermits(GetMode.DEFAULT);
}
if (allocation.getPermits() > 0) {
BatchedPermitsRequester.this.permitBatchContainer.addPermitAllocation(allocation);
}
clearCallback();
if (allocation.getPermits() > 0) {
BatchedPermitsRequester.this.newPermitsAvailable.signalAll();
}
} catch (InterruptedException ie) {
// Thread was interrupted while waiting for permits to be usable. Permits are not yet usable, so will not
// add permits to container
} finally {
try {
this.timerContext.close();
} catch (IOException ioe) {
// Do nothing
}
BatchedPermitsRequester.this.lock.unlock();
}
}
public long elapsedTime() {
return System.currentTimeMillis() - this.startTime;
}
public synchronized void clearCallback() {
if (this.callbackCleared) {
return;
}
clearSemaphore();
this.callbackCleared = true;
}
private void nonRetriableFail(Throwable exc, String msg) {
BatchedPermitsRequester.this.retryStatus.blockRetries(RETRY_DELAY_ON_NON_RETRIABLE_EXCEPTION, exc);
BatchedPermitsRequester.this.callbackCounter.incrementAndGet();
clearCallback();
log.error(msg, exc);
// Wake up all threads so they can return false
BatchedPermitsRequester.this.newPermitsAvailable.signalAll();
}
}
/**
* A batch of permits obtained from the server.
*/
@NotThreadSafe
@Getter
private static class PermitBatch {
private static final AtomicLong NEXT_KEY = new AtomicLong(0);
private volatile long permits;
private final long expiration;
private final long autoIncrementKey;
private final long initialPermits;
private long firstUseTime;
private long lastPermitUsedTime;
private int permitRequests;
PermitBatch(long permits, long expiration) {
this.permits = permits;
this.expiration = expiration;
this.initialPermits = permits;
this.autoIncrementKey = NEXT_KEY.getAndIncrement();
}
/**
* Use this number of permits. (Note, this does not check that there are enough permits).
*/
private void decrementPermits(long value) {
if (this.firstUseTime == 0) {
this.firstUseTime = System.currentTimeMillis();
}
this.permitRequests++;
this.permits -= value;
if (this.permits <= 0) {
this.lastPermitUsedTime = System.currentTimeMillis();
}
}
/**
* Get the average rate at which permits in this batch have been used.
*/
private double getAverageDepletionRate() {
if (this.firstUseTime == 0) {
return MAX_DEPLETION_RATE;
}
long endTime = this.lastPermitUsedTime > 0 ? this.lastPermitUsedTime : System.currentTimeMillis();
if (endTime > this.firstUseTime) {
return (double) (this.initialPermits - this.permits) / (endTime - this.firstUseTime);
} else {
return MAX_DEPLETION_RATE;
}
}
}
/**
* A container for {@link PermitBatch}es obtained from the server.
*/
static class PermitBatchContainer {
private final TreeMultimap<Long, PermitBatch> batches = TreeMultimap.create(Ordering.natural(), new Comparator<PermitBatch>() {
@Override
public int compare(PermitBatch o1, PermitBatch o2) {
return Long.compare(o1.autoIncrementKey, o2.autoIncrementKey);
}
});
@Getter
private volatile long totalAvailablePermits = 0;
private synchronized boolean tryTake(long permits) {
purgeExpiredBatches();
if (this.totalAvailablePermits < permits) {
return false;
}
this.totalAvailablePermits -= permits;
Iterator<PermitBatch> batchesIterator = this.batches.values().iterator();
while (batchesIterator.hasNext()) {
PermitBatch batch = batchesIterator.next();
if (batch.getPermits() < permits) {
permits -= batch.getPermits();
batchesIterator.remove();
} else {
batch.decrementPermits(permits);
return true;
}
}
// This can only happen if totalAvailablePermits is not in sync with the actual batches
throw new RuntimeException("Total permits was unsynced! This is an error in code.");
}
/** Print the state of the container. Useful for debugging. */
private synchronized void printState(String prefix) {
StringBuilder builder = new StringBuilder(prefix).append("->");
builder.append("BatchedPermitsRequester state (").append(hashCode()).append("): ");
builder.append("TotalPermits: ").append(this.totalAvailablePermits).append(" ");
builder.append("Batches(").append(this.batches.size()).append("): ");
for (PermitBatch batch : this.batches.values()) {
builder.append(batch.getPermits()).append(",");
}
log.info(builder.toString());
}
private synchronized void purgeExpiredBatches() {
long now = System.currentTimeMillis();
purgeBatches(this.batches.asMap().subMap(Long.MIN_VALUE, now).values().iterator());
}
private synchronized void purgeAll() {
purgeBatches(this.batches.asMap().values().iterator());
}
private void purgeBatches(Iterator<Collection<PermitBatch>> iterator) {
while (iterator.hasNext()) {
Collection<PermitBatch> batches = iterator.next();
for (PermitBatch batch : batches) {
Long permitsExpired = batch.getPermits();
this.totalAvailablePermits -= permitsExpired;
}
iterator.remove();
}
}
private synchronized void addPermitAllocation(PermitAllocation allocation) {
this.batches.put(allocation.getExpiration(),
new PermitBatch(allocation.getPermits(), allocation.getExpiration()));
this.totalAvailablePermits += allocation.getPermits();
}
}
private static class SynchronizedAverager {
private volatile long weight;
private volatile long entries;
@SuppressFBWarnings(value = "VO_VOLATILE_INCREMENT", justification = "All methods updating volatile variables are synchronized")
public synchronized void addEntryWithWeight(long weight) {
this.entries++;
this.weight += weight;
}
@SuppressFBWarnings(value = "VO_VOLATILE_INCREMENT", justification = "All methods updating volatile variables are synchronized")
public synchronized void removeEntryWithWeight(long weight) {
if (this.entries == 0) {
throw new IllegalStateException("Cannot have a negative number of entries.");
}
this.entries--;
this.weight -= weight;
}
public synchronized double getAverageWeightOrZero() {
if (this.entries == 0) {
return 0;
}
return (double) this.weight / this.entries;
}
public long getTotalWeight() {
return this.weight;
}
public long getNumEntries() {
return this.entries;
}
}
/**
* Stores the retry state of a {@link BatchedPermitsRequester}, e.g. whether it can keep retrying.
*/
private class RetryStatus {
private long retryAt;
@Nullable private Throwable exception;
public boolean canRetryNow() {
return canRetryWithinMillis(0);
}
public boolean canRetryWithinMillis(long millis) {
return System.currentTimeMillis() + millis >= this.retryAt;
}
public void blockRetries(long millis, Throwable exception) {
this.exception = exception;
this.retryAt = System.currentTimeMillis() + millis;
SCHEDULE_EXECUTOR_SERVICE.schedule(new Runnable() {
@Override
public void run() {
maybeSendNewPermitRequest();
}
}, millis, TimeUnit.MILLISECONDS);
}
}
/**
* Clear all stored permits.
*/
@VisibleForTesting
public void clearAllStoredPermits() {
this.getPermitBatchContainer().purgeAll();
}
}
| 1,681 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/RestClientRequestSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter;
import com.linkedin.common.callback.Callback;
import com.linkedin.restli.client.Request;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import org.apache.gobblin.restli.throttling.PermitAllocation;
import org.apache.gobblin.restli.throttling.PermitRequest;
import org.apache.gobblin.restli.throttling.PermitsGetRequestBuilder;
import org.apache.gobblin.restli.throttling.PermitsRequestBuilders;
import lombok.AllArgsConstructor;
/**
* Sends requests to a server using a {@link RestClient}. Subclasses can decorate the callback to intercept
* certain response statuses.
*/
@AllArgsConstructor
public abstract class RestClientRequestSender implements RequestSender {
@Override
public void sendRequest(PermitRequest request, Callback<Response<PermitAllocation>> callback) {
PermitsGetRequestBuilder getBuilder = new PermitsRequestBuilders().get();
Request<PermitAllocation> fullRequest = getBuilder.id(new ComplexResourceKey<>(request, new EmptyRecord())).build();
getRestClient().sendRequest(fullRequest, decorateCallback(request, callback));
}
/**
* Decorate the callback to intercept some responses.
*/
protected Callback<Response<PermitAllocation>> decorateCallback(PermitRequest request,
Callback<Response<PermitAllocation>> callback) {
return callback;
}
/**
* @return The {@link RestClient} to use to send the request.
*/
protected abstract RestClient getRestClient();
}
| 1,682 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/RandomDelayStartStressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.util.limiter.Limiter;
/**
* A {@link Stressor} that sleeps for a random delay between 0 and 30 seconds before starting.
*/
public abstract class RandomDelayStartStressor implements Stressor {
@Override
public void configure(Configuration configuration) {
}
@Override
public void run(Limiter limiter) throws InterruptedException {
long delayStartSeconds = new Random().nextInt(30);
Thread.sleep(delayStartSeconds * 1000);
doRun(limiter);
}
/**
* Run the actual logic in the {@link Stressor}.
*/
public abstract void doRun(Limiter limiter) throws InterruptedException;
}
| 1,683 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/StressTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
/**
* Utilities for throttling service stress tests.
*/
@Slf4j
public class StressTestUtils {
public static final Option HELP_OPT = new Option("h", "Print help");
public static final Option CONFIG_OPT = new Option("conf", true, "Set configuration for the stressor.");
public static final Option STRESSOR_OPT = new Option("stressor", true, "Stressor class.");
public static final String STRESSOR_CLASS = "stressTest.stressor.class";
public static final Class<? extends Stressor> DEFAULT_STRESSOR_CLASS = FixedOperationsStressor.class;
public static final Options OPTIONS = new Options().addOption(HELP_OPT).addOption(CONFIG_OPT);
/**
* Parse command line.
*/
public static CommandLine parseCommandLine(Options options, String[] args) throws ParseException {
CommandLineParser parser = new DefaultParser();
CommandLine cli = parser.parse(options, args);
if (cli.hasOption(StressTestUtils.HELP_OPT.getOpt())) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp( MRStressTest.class.getSimpleName(), OPTIONS);
System.exit(0);
}
return cli;
}
/**
* Add configurations provided with {@link #CONFIG_OPT} to {@link Configuration}.
*/
public static void populateConfigFromCli(Configuration configuration, CommandLine cli) {
String stressorClass = cli.getOptionValue(STRESSOR_OPT.getOpt(), DEFAULT_STRESSOR_CLASS.getName());
configuration.set(STRESSOR_CLASS, stressorClass);
if (cli.hasOption(CONFIG_OPT.getOpt())) {
for (String arg : cli.getOptionValues(CONFIG_OPT.getOpt())) {
List<String> tokens = Splitter.on(":").limit(2).splitToList(arg);
if (tokens.size() < 2) {
throw new IllegalArgumentException("Configurations must be of the form <key>:<value>");
}
configuration.set(tokens.get(0), tokens.get(1));
}
}
}
}
| 1,684 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/RandomRuntimeStressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.util.limiter.Limiter;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link Stressor} that repeatedly requests 1 permits from {@link Limiter} for a random number of seconds between
* 1 and 181.
*/
@Slf4j
public class RandomRuntimeStressor extends RandomDelayStartStressor {
@Override
public void configure(Configuration configuration) {
// Do nothing
}
@Override
public void doRun(Limiter limiter) throws InterruptedException {
long runForSeconds = new Random().nextInt(180) + 1;
long endTime = System.currentTimeMillis() + runForSeconds * 1000;
while (System.currentTimeMillis() < endTime) {
limiter.acquirePermits(1);
}
}
}
| 1,685 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/MRStressTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.SimpleScopeType;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.restli.SharedRestClientFactory;
import org.apache.gobblin.restli.SharedRestClientKey;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.MultiLimiter;
import org.apache.gobblin.util.limiter.NoopLimiter;
import org.apache.gobblin.util.limiter.RateBasedLimiter;
import org.apache.gobblin.util.limiter.RestliLimiterFactory;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* An MR job to test the performance of throttling.
*
* Each mapper runs a {@link Stressor}, which uses an {@link AtomicLong} to record its progress, and a {@link Limiter}
* to throttle is progress. Different {@link Stressor}s might produce different usage patterns.
*
* The mappers emit a report every 15 seconds with the rate at which the {@link Stressor} is making progress (measured by
* the rate at which the {@link AtomicLong} increases).
*
* The reducer computes the aggregate rate at which all {@link Stressor}s make progress.
*/
@Slf4j
public class MRStressTest {
public static final String USE_THROTTLING_SERVER = "stressTest.useThrottlingServer";
public static final String RESOURCE_ID = "stressTest.resourceLimited";
public static final String LOCALLY_ENFORCED_QPS = "stressTest.localQps";
public static final String NUM_MAPPERS = "stressTest.num.mappers";
public static final String DEFAULT_MAPPERS = "10";
public static final Option NUM_MAPPERS_OPT = new Option("mappers", true, "Num mappers");
public static final Option THROTTLING_SERVER_URI = new Option("throttling", true, "Throttling server uri");
public static final Option RESOURCE_ID_OPT = new Option("resource", true, "Resource id for throttling server");
public static final Option LOCAL_QPS_OPT = new Option("localQps", true, "Locally enforced QPS");
public static final Options OPTIONS = StressTestUtils.OPTIONS.addOption(NUM_MAPPERS_OPT).addOption(THROTTLING_SERVER_URI)
.addOption(RESOURCE_ID_OPT).addOption(LOCAL_QPS_OPT);
public static void main(String[] args) throws Exception {
CommandLine cli = StressTestUtils.parseCommandLine(OPTIONS, args);
Configuration configuration = new Configuration();
if (cli.hasOption(THROTTLING_SERVER_URI.getOpt())) {
configuration.setBoolean(USE_THROTTLING_SERVER, true);
String resourceLimited = cli.getOptionValue(RESOURCE_ID_OPT.getOpt(), "MRStressTest");
configuration.set(RESOURCE_ID, resourceLimited);
configuration.set(
BrokerConfigurationKeyGenerator.generateKey(new SharedRestClientFactory(),
new SharedRestClientKey(RestliLimiterFactory.RESTLI_SERVICE_NAME),
null, SharedRestClientFactory.SERVER_URI_KEY), cli.getOptionValue(THROTTLING_SERVER_URI.getOpt()));
}
if (cli.hasOption(LOCAL_QPS_OPT.getOpt())) {
configuration .set(LOCALLY_ENFORCED_QPS, cli.getOptionValue(LOCAL_QPS_OPT.getOpt()));
}
Job job = Job.getInstance(configuration, "ThrottlingStressTest");
job.getConfiguration().setBoolean("mapreduce.job.user.classpath.first", true);
job.getConfiguration().setBoolean("mapreduce.map.speculative", false);
job.getConfiguration().set(NUM_MAPPERS, cli.getOptionValue(NUM_MAPPERS_OPT.getOpt(), DEFAULT_MAPPERS));
StressTestUtils.populateConfigFromCli(job.getConfiguration(), cli);
job.setJarByClass(MRStressTest.class);
job.setMapperClass(StresserMapper.class);
job.setReducerClass(AggregatorReducer.class);
job.setInputFormatClass(MyInputFormat.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(DoubleWritable.class);
FileOutputFormat.setOutputPath(job, new Path("/tmp/MRStressTest" + System.currentTimeMillis()));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
/**
* Instantiates a {@link Stressor} and runs it until it exits. It also sets up a {@link Recorder} that computes and
* records the rate at which the {@link AtomicLong} increases every 15 seconds.
*/
public static class StresserMapper extends Mapper<Text, NullWritable, LongWritable, DoubleWritable> {
private SharedResourcesBroker<SimpleScopeType> broker;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
Map<String, String> configMap = Maps.newHashMap();
SharedResourcesBrokerFactory.addBrokerKeys(configMap, context.getConfiguration());
this.broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.parseMap(configMap),
SimpleScopeType.GLOBAL.defaultScopeInstance());
super.setup(context);
}
@Override
protected void map(Text key, NullWritable value, Context context) throws IOException, InterruptedException {
try {
Configuration configuration = context.getConfiguration();
Stressor stressor = context.getConfiguration().getClass(StressTestUtils.STRESSOR_CLASS,
StressTestUtils.DEFAULT_STRESSOR_CLASS, Stressor.class).newInstance();
stressor.configure(context.getConfiguration());
RateComputingLimiterContainer limiterContainer = new RateComputingLimiterContainer();
Limiter limiter = limiterContainer.decorateLimiter(createLimiter(configuration, this.broker));
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
ScheduledFuture<?> future = executor.scheduleAtFixedRate(new Recorder(limiterContainer, context, true),
0, 15, TimeUnit.SECONDS);
limiter.start();
stressor.run(limiter);
limiter.stop();
future.cancel(false);
ExecutorsUtils.shutdownExecutorService(executor, Optional.<Logger>absent(), 10, TimeUnit.SECONDS);
} catch (ReflectiveOperationException roe) {
throw new IOException(roe);
}
}
}
/**
* Simply adds up the rates for each key.
*/
public static class AggregatorReducer extends Reducer<LongWritable, DoubleWritable, LongWritable, Text> {
@Override
protected void reduce(LongWritable key, Iterable<DoubleWritable> values, Context context)
throws IOException, InterruptedException {
double totalRate = 0;
int activeMappers = 0;
for (DoubleWritable value : values) {
totalRate += value.get();
activeMappers++;
}
context.write(key, new Text(String.format("%f\t%d", totalRate, activeMappers)));
}
}
/**
* Input format that just generates {@link #NUM_MAPPERS} dummy splits.
*/
public static class MyInputFormat extends InputFormat<Text, NullWritable> {
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
int numMappers = context.getConfiguration().getInt(NUM_MAPPERS, 1);
List<InputSplit> splits = Lists.newArrayList();
for (int i = 0; i < numMappers; i++) {
splits.add(new MySplit());
}
return splits;
}
@Override
public RecordReader<Text, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
return new MyRecordReader((MySplit) split);
}
}
/**
* A dummy {@link InputSplit}.
*/
@Data
public static class MySplit extends InputSplit implements Writable {
@Override
public long getLength() throws IOException, InterruptedException {
return 1;
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
return new String[0];
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, "split");
}
@Override
public void readFields(DataInput in) throws IOException {
Text.readString(in);
}
}
/**
* A dummy {@link RecordReader} that emits a single key-value.
*/
@RequiredArgsConstructor
public static class MyRecordReader extends RecordReader<Text, NullWritable> {
private final MySplit split;
boolean keyValueAvailable = true;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!this.keyValueAvailable) {
return false;
}
this.keyValueAvailable = false;
return true;
}
@Override
public Text getCurrentKey() throws IOException, InterruptedException {
return new Text("split");
}
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
@Override
public void close() throws IOException {
}
}
/**
* A {@link Runnable} that computes the average rate at which the input {@link AtomicLong} increases and emits it to the
* mapper collector.
*/
@RequiredArgsConstructor
private static class Recorder implements Runnable {
private final RateComputingLimiterContainer limiter;
private final MapContext<Text, NullWritable, LongWritable, DoubleWritable> context;
private final boolean relativeKey;
private int runs = -1;
@Override
public void run() {
DescriptiveStatistics stats = this.limiter.getRateStatsSinceLastReport();
long now = System.currentTimeMillis();
this.runs++;
if (stats != null) {
long key;
if (this.relativeKey) {
key = 15 * this.runs;
} else {
DateTime nowTime = new DateTime(now).withMillisOfSecond(0);
DateTime rounded = nowTime.withSecondOfMinute(15 * (nowTime.getSecondOfMinute() / 15));
key = rounded.getMillis() / 1000;
}
try {
this.context.write(new LongWritable(key), new DoubleWritable(stats.getSum()));
} catch (IOException | InterruptedException ioe) {
log.error("Error: ", ioe);
}
}
}
}
static Limiter createLimiter(Configuration configuration, SharedResourcesBroker<SimpleScopeType> broker) {
try {
Limiter limiter = new NoopLimiter();
long localQps = configuration.getLong(LOCALLY_ENFORCED_QPS, 0);
if (localQps > 0) {
log.info("Setting up local qps " + localQps);
limiter = new MultiLimiter(limiter, new RateBasedLimiter(localQps));
}
if (configuration.getBoolean(USE_THROTTLING_SERVER, false)) {
log.info("Setting up remote throttling.");
String resourceId = configuration.get(RESOURCE_ID);
Limiter globalLimiter =
broker.getSharedResource(new RestliLimiterFactory<SimpleScopeType>(), new SharedLimiterKey(resourceId));
limiter = new MultiLimiter(limiter, globalLimiter);
}
return limiter;
} catch (NotConfiguredException nce) {
throw new RuntimeException(nce);
}
}
}
| 1,686 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/RateComputingLimiterContainer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import java.io.Closeable;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.RestliServiceBasedLimiter;
/**
* Used to compute statistics on a set of {@link Limiter}s used in a throttling service stress test.
*/
@Slf4j
@RequiredArgsConstructor
public class RateComputingLimiterContainer {
private final List<AtomicLong> subLimiterPermitCounts = Lists.newArrayList();
private final Queue<Long> unusedPermitsCounts = new LinkedList<>();
private Map<String, Long> lastReportTimes = Maps.newHashMap();
/**
* Decorate a {@link Limiter} to measure its permit rate for statistics computation.
*/
public Limiter decorateLimiter(Limiter limiter) {
AtomicLong localCount = new AtomicLong();
return new RateComputingLimiterDecorator(limiter, localCount);
}
/**
* A {@link Limiter} decorator that records all permits granted.
*/
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public class RateComputingLimiterDecorator implements Limiter, Decorator {
private final Limiter underlying;
private final AtomicLong localPermitCount;
@Override
public Object getDecoratedObject() {
return this.underlying;
}
@Override
public void start() {
this.underlying.start();
RateComputingLimiterContainer.this.subLimiterPermitCounts.add(this.localPermitCount);
}
@Override
public Closeable acquirePermits(long permits) throws InterruptedException {
Closeable closeable = this.underlying.acquirePermits(permits);
this.localPermitCount.addAndGet(permits);
return closeable;
}
@Override
public void stop() {
this.underlying.stop();
if (this.underlying instanceof RestliServiceBasedLimiter) {
RestliServiceBasedLimiter restliLimiter = (RestliServiceBasedLimiter) this.underlying;
RateComputingLimiterContainer.this.unusedPermitsCounts.add(restliLimiter.getUnusedPermits());
log.info("Unused permits: " + restliLimiter.getUnusedPermits());
}
RateComputingLimiterContainer.this.subLimiterPermitCounts.remove(this.localPermitCount);
}
}
/**
* Get a {@link DescriptiveStatistics} object with the rate of permit granting for all {@link Limiter}s decorated
* with this {@link RateComputingLimiterContainer}.
*/
public @Nullable DescriptiveStatistics getRateStatsSinceLastReport() {
return getNormalizedStatistics("seenQPS", Lists.transform(this.subLimiterPermitCounts, new Function<AtomicLong, Double>() {
@Override
public Double apply(AtomicLong atomicLong) {
return (double) atomicLong.getAndSet(0);
}
}));
}
public @Nullable DescriptiveStatistics getUnusedPermitsSinceLastReport() {
DescriptiveStatistics stats = getNormalizedStatistics("unusedPermits", this.unusedPermitsCounts);
this.unusedPermitsCounts.clear();
return stats;
}
private @Nullable DescriptiveStatistics getNormalizedStatistics(String key, Collection<? extends Number> values) {
long now = System.currentTimeMillis();
long deltaTime = 0;
if (this.lastReportTimes.containsKey(key)) {
deltaTime = now - this.lastReportTimes.get(key);
}
this.lastReportTimes.put(key, now);
if (deltaTime == 0) {
return null;
}
double[] normalizedValues = new double[values.size()];
int i = 0;
for (Number value : values) {
normalizedValues[i++] = 1000 * value.doubleValue() / deltaTime;
}
return new DescriptiveStatistics(normalizedValues);
}
}
| 1,687 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/FixedOperationsStressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.util.limiter.Limiter;
/**
* A {@link Stressor} that performs a fixed number of permit requests to the {@link Limiter} without pausing.
*/
public class FixedOperationsStressor extends RandomDelayStartStressor {
public static final String OPS_TO_RUN = "fixedOperationsStressor.opsToRun";
public static final int DEFAULT_OPS_TARGET = 200;
private int opsTarget;
@Override
public void configure(Configuration configuration) {
super.configure(configuration);
this.opsTarget = configuration.getInt(OPS_TO_RUN, DEFAULT_OPS_TARGET);
}
@Override
public void doRun(Limiter limiter) throws InterruptedException {
int ops = 0;
while (ops < this.opsTarget) {
limiter.acquirePermits(1);
ops++;
}
}
}
| 1,688 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-client/src/main/java/org/apache/gobblin/util/limiter/stressTest/Stressor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.limiter.stressTest;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.util.limiter.Limiter;
/**
* An experiment for throttling service stress test. A {@link Stressor} should generate a pattern of requests to the input
* {@link Limiter}.
*/
public interface Stressor {
void configure(Configuration configuration);
void run(Limiter limiter) throws InterruptedException;
}
| 1,689 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-api/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-api/src/main/java/org/apache/gobblin/restli/throttling/ThrottlingProtocolVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
/**
* Versions of the Throttling service protocol. Allows the server to know what the client understands, and to adjust
* the response based on the client version. Only add new versions at the end.
*/
public enum ThrottlingProtocolVersion {
/** Base version of throttling server. */
BASE,
/** Clients at this level know to wait before distributing permits allocated to them. */
WAIT_ON_CLIENT
}
| 1,690 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/PoliciesResourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.inject.Injector;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
public class PoliciesResourceTest {
@Test
public void test() {
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey("res1");
Map<String, String> configMap = com.google.common.collect.ImmutableMap.<String, String>builder()
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
CountBasedPolicy.FACTORY_ALIAS)
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, CountBasedPolicy.COUNT_KEY), "100")
.build();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
Injector injector = guiceServletConfig.getInjector();
PoliciesResource policiesResource = injector.getInstance(PoliciesResource.class);
Policy policy = policiesResource.get("res1");
Assert.assertEquals(policy.getPolicyName(), CountBasedPolicy.class.getSimpleName());
Assert.assertEquals(policy.getResource(), "res1");
Assert.assertEquals(policy.getParameters().get("maxPermits"), "100");
guiceServletConfig.close();
}
}
| 1,691 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/LimiterServerResourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.Sleeper;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.codahale.metrics.Timer;
import com.google.inject.Injector;
import com.linkedin.data.template.GetMode;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.BrokerConfigurationKeyGenerator;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import com.google.common.collect.ImmutableMap;
public class LimiterServerResourceTest {
@Test
public void test() {
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.empty());
Injector injector = guiceServletConfig.getInjector();
LimiterServerResource limiterServer = injector.getInstance(LimiterServerResource.class);
PermitRequest request = new PermitRequest();
request.setPermits(10);
request.setResource("myResource");
PermitAllocation allocation = limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
Assert.assertTrue(allocation.getPermits() >= 10);
}
@Test
public void testSleepOnClientDelegation() {
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey("res1");
Map<String, String> configMap = ImmutableMap.<String, String>builder()
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
TestWaitPolicy.class.getName())
.build();
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
Sleeper.MockSleeper sleeper = guiceServletConfig.mockSleeper();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
Injector injector = guiceServletConfig.getInjector();
LimiterServerResource limiterServer = injector.getInstance(LimiterServerResource.class);
PermitRequest request = new PermitRequest();
request.setPermits(5);
request.setResource(res1key.getResourceLimitedPath());
request.setVersion(ThrottlingProtocolVersion.BASE.ordinal());
// policy does not require sleep, verify no sleep happened or is requested from client
PermitAllocation allocation = limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
Assert.assertEquals((long) allocation.getPermits(), 5);
Assert.assertEquals((long) allocation.getWaitForPermitUseMillis(GetMode.DEFAULT), 0);
Assert.assertTrue(sleeper.getRequestedSleeps().isEmpty());
// policy requests a sleep of 10 millis, using BASE protocol version, verify server executes the sleep
request.setPermits(20);
request.setVersion(ThrottlingProtocolVersion.BASE.ordinal());
allocation = limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
Assert.assertEquals((long) allocation.getPermits(), 20);
Assert.assertEquals((long) allocation.getWaitForPermitUseMillis(GetMode.DEFAULT), 0);
Assert.assertEquals((long) sleeper.getRequestedSleeps().peek(), 10);
sleeper.reset();
// policy requests a sleep of 10 millis, using WAIT_ON_CLIENT protocol version, verify server delegates sleep to client
request.setVersion(ThrottlingProtocolVersion.WAIT_ON_CLIENT.ordinal());
request.setPermits(20);
allocation = limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
Assert.assertEquals((long) allocation.getPermits(), 20);
Assert.assertEquals((long) allocation.getWaitForPermitUseMillis(GetMode.DEFAULT), 10);
Assert.assertTrue(sleeper.getRequestedSleeps().isEmpty());
}
@Test
public void testLimitedRequests() {
ThrottlingPolicyFactory factory = new ThrottlingPolicyFactory();
SharedLimiterKey res1key = new SharedLimiterKey("res1");
SharedLimiterKey res2key = new SharedLimiterKey("res2");
Map<String, String> configMap = ImmutableMap.<String, String>builder()
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, ThrottlingPolicyFactory.POLICY_KEY),
CountBasedPolicy.FACTORY_ALIAS)
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res1key, null, CountBasedPolicy.COUNT_KEY), "100")
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res2key, null, ThrottlingPolicyFactory.POLICY_KEY),
CountBasedPolicy.FACTORY_ALIAS)
.put(BrokerConfigurationKeyGenerator.generateKey(factory, res2key, null, CountBasedPolicy.COUNT_KEY), "50")
.build();
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.parseMap(configMap));
Injector injector = guiceServletConfig.getInjector();
LimiterServerResource limiterServer = injector.getInstance(LimiterServerResource.class);
PermitRequest res1request = new PermitRequest();
res1request.setPermits(20);
res1request.setResource(res1key.getResourceLimitedPath());
PermitRequest res2request = new PermitRequest();
res2request.setPermits(20);
res2request.setResource(res2key.getResourceLimitedPath());
PermitRequest res3request = new PermitRequest();
res3request.setPermits(100000);
res3request.setResource("res3");
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits(), new Long(20));
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits(), new Long(20));
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits(), new Long(20));
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits(), new Long(20));
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits(), new Long(20));
try {
// out of permits
limiterServer.getSync(new ComplexResourceKey<>(res1request, new EmptyRecord())).getPermits();
Assert.fail();
} catch (RestLiServiceException exc) {
Assert.assertEquals(exc.getStatus(), HttpStatus.S_403_FORBIDDEN);
}
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res2request, new EmptyRecord())).getPermits(), new Long(20));
Assert.assertEquals(limiterServer.getSync(new ComplexResourceKey<>(res2request, new EmptyRecord())).getPermits(), new Long(20));
// out of permits
try {
// out of permits
limiterServer.getSync(new ComplexResourceKey<>(res2request, new EmptyRecord())).getPermits();
Assert.fail();
} catch (RestLiServiceException exc) {
Assert.assertEquals(exc.getStatus(), HttpStatus.S_403_FORBIDDEN);
}
// No limit
Assert.assertTrue(limiterServer.getSync(new ComplexResourceKey<>(res3request, new EmptyRecord())).getPermits() >= res3request.getPermits());
}
@Test
public void testMetrics() throws Exception {
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(ConfigFactory.empty());
Injector injector = guiceServletConfig.getInjector();
LimiterServerResource limiterServer = injector.getInstance(LimiterServerResource.class);
PermitRequest request = new PermitRequest();
request.setPermits(10);
request.setResource("myResource");
limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
limiterServer.getSync(new ComplexResourceKey<>(request, new EmptyRecord()));
MetricContext metricContext = limiterServer.metricContext;
Timer timer = metricContext.timer(LimiterServerResource.REQUEST_TIMER_NAME);
Assert.assertEquals(timer.getCount(), 3);
}
public static class TestWaitPolicy implements ThrottlingPolicy, ThrottlingPolicyFactory.SpecificPolicyFactory {
@Override
public PermitAllocation computePermitAllocation(PermitRequest request) {
PermitAllocation allocation = new PermitAllocation();
allocation.setPermits(request.getPermits());
if (request.getPermits() > 10) {
allocation.setWaitForPermitUseMillis(10);
}
return allocation;
}
@Override
public Map<String, String> getParameters() {
return null;
}
@Override
public String getDescription() {
return null;
}
@Override
public ThrottlingPolicy createPolicy(SharedLimiterKey sharedLimiterKey,
SharedResourcesBroker<ThrottlingServerScopes> broker, Config config) {
return this;
}
}
}
| 1,692 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/DynamicTokenBucketTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import org.testng.Assert;
import org.testng.annotations.Test;
public class DynamicTokenBucketTest {
@Test
public void test() throws Exception {
int qps = 10;
DynamicTokenBucket limiter = new DynamicTokenBucket(qps, 10, 0);
// Requesting 10 seconds worth of permits with 1 second timeout fails
Assert.assertEquals(limiter.getPermits(10 * qps, 10 * qps, 1000), 0);
// Requesting 0.2 seconds worth of permits with 300 millis timeout succeeds
long permits = qps / 5;
Assert.assertEquals(limiter.getPermits(permits, permits, 300), permits);
// Requesting 1 seconds worth of permits, with min of 0.1 seconds, and with 200 millis timeout will return at least
// min permits
permits = qps;
Assert.assertTrue(limiter.getPermits(permits, qps / 10, 200) >= qps / 10);
}
@Test
public void testDelegateSleep() throws Exception {
int qps = 10;
DynamicTokenBucket limiter = new DynamicTokenBucket(qps, 10, 0);
long startTime = System.currentTimeMillis();
// Requesting 10 seconds worth of permits with 20 second timeout
DynamicTokenBucket.PermitsAndDelay permitsAndDelay = limiter.getPermitsAndDelay(10 * qps, 10 * qps, 20000);
long elapsed = System.currentTimeMillis() - startTime;
// verify call returned immediately
Assert.assertTrue(elapsed < 1000);
// Verify we got expected tokens and delay is about 10 seconds
Assert.assertEquals(permitsAndDelay.getPermits(), 10 * qps);
Assert.assertTrue(permitsAndDelay.getDelay() > 9000 && permitsAndDelay.getDelay() < 11000);
}
@Test
public void testEagerGrantingIfUnderused() throws Exception {
int qps = 100;
DynamicTokenBucket limiter = new DynamicTokenBucket(qps, 10, 100);
Thread.sleep(100); // fill bucket
// Grant 4 permits even though only 1 requested
Assert.assertTrue(limiter.getPermits(1, 0, 100) > 4);
}
}
| 1,693 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/ConfigStoreBasedPolicyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.net.URL;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import com.google.common.collect.ImmutableMap;
public class ConfigStoreBasedPolicyTest {
@Test
public void test() throws Exception {
URL prefix = getClass().getResource("/configStore");
Config config = ConfigFactory.parseMap(ImmutableMap.of(
ConfigClientBasedPolicyFactory.CONFIG_KEY_URI_PREFIX_KEY, "simple-" + prefix.toString()
));
SharedResourcesBroker<ThrottlingServerScopes> broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.empty(), ThrottlingServerScopes.GLOBAL.defaultScopeInstance());
ConfigClientBasedPolicyFactory policyFactory = new ConfigClientBasedPolicyFactory();
ThrottlingPolicy policy =
policyFactory.createPolicy(new SharedLimiterKey("ConfigBasedPolicyTest/resource1"), broker, config);
Assert.assertEquals(policy.getClass(), QPSPolicy.class);
Assert.assertEquals(((QPSPolicy) policy).getQps(), 100);
policy =
policyFactory.createPolicy(new SharedLimiterKey("ConfigBasedPolicyTest/resource2"), broker, config);
Assert.assertEquals(policy.getClass(), CountBasedPolicy.class);
Assert.assertEquals(((CountBasedPolicy) policy).getCount(), 50);
}
}
| 1,694 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/TokenBucketTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import lombok.AllArgsConstructor;
public class TokenBucketTest {
@Test
public void testSmallQps() throws Exception {
testForQps(100);
}
@Test
public void testLargeQps() throws Exception {
testForQps((long) 1e10);
}
@Test
public void testTimeout() throws Exception {
TokenBucket tokenBucket = new TokenBucket(100, 0);
// If it cannot satisfy the request within the timeout, return false immediately
Assert.assertFalse(tokenBucket.getTokens(100, 1, TimeUnit.MILLISECONDS));
Assert.assertFalse(tokenBucket.getTokens(100, 10, TimeUnit.MILLISECONDS));
Assert.assertFalse(tokenBucket.getTokens(100, 100, TimeUnit.MILLISECONDS));
Assert.assertTrue(tokenBucket.getTokens(10, 101, TimeUnit.MILLISECONDS));
// Can use stored tokens to satisfy request
tokenBucket = new TokenBucket(100, 100);
Thread.sleep(200); // fill up bucket
Assert.assertTrue(tokenBucket.getTokens(20, 101, TimeUnit.MILLISECONDS));
}
private void testForQps(long qps) throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(10);
TokenBucket tokenBucket = new TokenBucket(qps, 1000);
List<Future<Boolean>> futures = Lists.newArrayList();
long permitsPerRequest = qps / 10;
long start = System.currentTimeMillis();
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
futures.add(executorService.submit(new MyRunnable(tokenBucket, permitsPerRequest, 1000)));
for (Future<Boolean> future : futures) {
Assert.assertTrue(future.get());
}
long end = System.currentTimeMillis();
double averageRate = 1000 * (double) (permitsPerRequest * futures.size()) / (end - start);
Assert.assertTrue(Math.abs(averageRate - qps) / qps < 0.2, "Average rate: " + averageRate + " expected: 100");
}
@AllArgsConstructor
public static class MyRunnable implements Callable<Boolean> {
private final TokenBucket tokenBucket;
private final long tokens;
private final long timeoutMillis;
@Override
public Boolean call() {
try {
return this.tokenBucket.getTokens(this.tokens, this.timeoutMillis, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
}
}
}
| 1,695 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/test/java/org/apache/gobblin/restli/throttling/TestFailover.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.net.URI;
import java.util.Map;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.RestLiServiceException;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
public class TestFailover {
@Test
public void test() throws Exception {
try(Closer closer = Closer.create()) {
Map<String, String> configMap = Maps.newHashMap();
TestingServer zkTestingServer = closer.register(new TestingServer(-1));
configMap.put(ThrottlingGuiceServletConfig.ZK_STRING_KEY, zkTestingServer.getConnectString());
configMap.put(ThrottlingGuiceServletConfig.HA_CLUSTER_NAME, TestFailover.class.getSimpleName() + "_cluster");
Config config = ConfigFactory.parseMap(configMap);
ThrottlingGuiceServletConfig server2001 = createServerAtPort(config, 2001);
PermitAllocation allocation = sendRequestToServer(server2001, 10);
Assert.assertTrue(allocation.getPermits() >= 1);
ThrottlingGuiceServletConfig server2002 = createServerAtPort(config, 2002);
allocation = sendRequestToServer(server2001, 10);
Assert.assertTrue(allocation.getPermits() >= 1);
try {
sendRequestToServer(server2002, 10);
Assert.fail();
} catch (RestLiServiceException exc) {
Assert.assertTrue(exc.hasErrorDetails());
Assert.assertTrue(exc.getErrorDetails().containsKey(LimiterServerResource.LOCATION_301));
Assert.assertEquals(new URI(exc.getErrorDetails().get(LimiterServerResource.LOCATION_301).toString()).getPort(), 2001);
}
server2001.close();
allocation = sendRequestToServer(server2002, 10);
Assert.assertTrue(allocation.getPermits() >= 1);
}
}
private ThrottlingGuiceServletConfig createServerAtPort(Config baseConfig, int port) {
ThrottlingGuiceServletConfig guiceServletConfig = new ThrottlingGuiceServletConfig();
guiceServletConfig.initialize(baseConfig.withFallback(ConfigFactory.parseMap(
ImmutableMap.of(ThrottlingGuiceServletConfig.LISTENING_PORT, port))));
return guiceServletConfig;
}
private PermitAllocation sendRequestToServer(ThrottlingGuiceServletConfig guiceServletConfig, long permits) {
return guiceServletConfig.getLimiterResource()
.getSync(new ComplexResourceKey<>(createPermitRequest(10), new EmptyRecord()));
}
private PermitRequest createPermitRequest(long permits) {
PermitRequest request = new PermitRequest();
request.setPermits(permits);
request.setRequestorIdentifier("requestor");
request.setResource("test");
return request;
}
}
| 1,696 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ZookeeperLeaderElection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.data.Stat;
import com.google.common.util.concurrent.AbstractIdleService;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.SerializationUtils;
/**
* A {@link LeaderFinder} using Zookeeper.
*/
@Slf4j
public class ZookeeperLeaderElection<T extends LeaderFinder.Metadata> extends AbstractIdleService implements LeaderFinder<T> {
private final String leaderElectionNode;
private final String leaderNode;
private final T localMetadata;
private final String zkConnectString;
private CuratorFramework zooKeeper;
private String nodeId;
private boolean isLeader;
private T leaderMetadata;
private volatile boolean fatalFailure = false;
private ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
/**
* @param zkConnectString Zookeeper connect string.
* @param clusterName Cluster name. Processes in the same cluster are identified by the cluster name.
* @param localMetadata {@link org.apache.gobblin.restli.throttling.LeaderFinder.Metadata} for the local process.
* @throws IOException
*/
public ZookeeperLeaderElection(String zkConnectString, String clusterName, T localMetadata) throws IOException {
this.zkConnectString = zkConnectString;
this.localMetadata = localMetadata;
if (!clusterName.startsWith("/")) {
clusterName = "/" + clusterName;
}
this.leaderElectionNode = clusterName + "/leaderElection";
this.leaderNode = clusterName + "/leader";
}
@Override
public boolean isLeader() {
if (this.fatalFailure) {
throw new IllegalStateException(ZookeeperLeaderElection.class.getSimpleName() + " has failed fatally.");
}
ReentrantReadWriteLock.ReadLock lock = this.readWriteLock.readLock();
lock.lock();
try {
return this.isLeader;
} finally {
lock.unlock();
}
}
@Override
public T getLeaderMetadata() {
if (this.fatalFailure) {
throw new IllegalStateException(ZookeeperLeaderElection.class.getSimpleName() + " has failed fatally.");
}
ReentrantReadWriteLock.ReadLock lock = this.readWriteLock.readLock();
lock.lock();
try {
return this.leaderMetadata;
} finally {
lock.unlock();
}
}
@Override
public T getLocalMetadata() {
return this.localMetadata;
}
@Override
protected void startUp() throws Exception {
reset();
}
@Override
protected void shutDown() throws Exception {
if (this.zooKeeper != null) {
this.zooKeeper.close();
}
}
private byte[] serializeMetadata(T metadata) throws IOException {
return SerializationUtils.serializeIntoBytes(metadata);
}
private T deserializeMetadata(byte[] bytes) throws IOException, ClassNotFoundException {
return (T) SerializationUtils.deserializeFromBytes(bytes, Metadata.class);
}
private void reset() {
ReentrantReadWriteLock.WriteLock lock = this.readWriteLock.writeLock();
lock.lock();
try {
if (this.zooKeeper != null) {
this.zooKeeper.close();
}
this.zooKeeper = CuratorFrameworkFactory.builder().retryPolicy(new ExponentialBackoffRetry(100, 3))
.connectString(this.zkConnectString).build();
this.zooKeeper.start();
if (!this.zooKeeper.blockUntilConnected(1, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not connect to Zookeeper.");
}
String nodePath = this.zooKeeper.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL_SEQUENTIAL)
.forPath(this.leaderElectionNode + "/p_");
this.nodeId = nodePath.substring(nodePath.lastIndexOf("/") + 1);
determineLeadership();
} catch (Throwable exc) {
throw new RuntimeException(exc);
} finally {
lock.unlock();
}
}
private void determineLeadership() {
ReentrantReadWriteLock.WriteLock lock = this.readWriteLock.writeLock();
lock.lock();
try {
List<String> children = this.zooKeeper.getChildren().forPath(this.leaderElectionNode);
Collections.sort(children);
int idx = children.indexOf(this.nodeId);
if (idx == 0) {
Stat stat = this.zooKeeper.checkExists().forPath(this.leaderNode);
if (stat == null) {
this.zooKeeper.create().forPath(this.leaderNode, serializeMetadata(this.localMetadata));
} else {
this.zooKeeper.setData().forPath(this.leaderNode, serializeMetadata(this.localMetadata));
}
this.isLeader = true;
} else {
this.isLeader = false;
String watchedNode = this.leaderElectionNode + "/" + children.get(idx - 1);
this.zooKeeper.checkExists().usingWatcher(new DetermineLeadershipWatcher()).forPath(watchedNode);
}
findLeader();
} catch (KeeperException exc) {
reset();
} catch (Throwable exc) {
log.error("Fatal failure.", exc);
this.fatalFailure = true;
} finally {
lock.unlock();
}
}
private void findLeader() {
ReentrantReadWriteLock.WriteLock lock = this.readWriteLock.writeLock();
lock.lock();
try {
if (this.zooKeeper.checkExists().usingWatcher(new FindLeaderWatcher()).forPath(this.leaderNode) == null) {
determineLeadership();
}
byte[] leaderData = this.zooKeeper.getData().usingWatcher(new FindLeaderWatcher()).forPath(this.leaderNode);
this.leaderMetadata = deserializeMetadata(leaderData);
} catch (KeeperException exc) {
reset();
} catch (Throwable exc) {
log.error("Fatal failure.", exc);
this.fatalFailure = true;
} finally {
lock.unlock();
}
}
public class FindLeaderWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
findLeader();
}
}
public class DetermineLeadershipWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
determineLeadership();
}
}
}
| 1,697 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/TokenBucket.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import lombok.AccessLevel;
import lombok.Getter;
/**
* An implementation of Token Bucket (https://en.wikipedia.org/wiki/Token_bucket).
*
* This class is intended to limit the rate at which tokens are used to a given QPS. It can store tokens for future usage.
*/
public class TokenBucket {
@Getter(AccessLevel.PROTECTED)
private double tokensPerMilli;
private double maxBucketSizeInTokens;
private volatile long nextTokenAvailableMillis;
private volatile double tokensStored;
public TokenBucket(long qps, long maxBucketSizeInMillis) {
this.nextTokenAvailableMillis = System.currentTimeMillis();
resetQPS(qps, maxBucketSizeInMillis);
}
public void resetQPS(long qps, long maxBucketSizeInMillis) {
Preconditions.checkArgument(qps > 0, "QPS must be positive.");
Preconditions.checkArgument(maxBucketSizeInMillis >= 0, "Max bucket size must be non-negative.");
long now = System.currentTimeMillis();
synchronized (this) {
updateTokensStored(now);
if (this.nextTokenAvailableMillis > now) {
this.tokensStored -= (this.nextTokenAvailableMillis - now) * this.tokensPerMilli;
}
this.tokensPerMilli = (double) qps / 1000;
this.maxBucketSizeInTokens = this.tokensPerMilli * maxBucketSizeInMillis;
}
}
/**
* Attempt to get the specified amount of tokens within the specified timeout. If the tokens cannot be retrieved in the
* specified timeout, the call will return false immediately, otherwise, the call will block until the tokens are available.
*
* @return true if the tokens are granted.
* @throws InterruptedException
*/
public boolean getTokens(long tokens, long timeout, TimeUnit timeoutUnit) throws InterruptedException {
long timeoutMillis = timeoutUnit.toMillis(timeout);
long wait = tryReserveTokens(tokens, timeoutMillis);
if (wait < 0) {
return false;
}
if (wait == 0) {
return true;
}
Thread.sleep(wait);
return true;
}
/**
* Get the current number of stored tokens. Note this is a snapshot of the object, and there is no guarantee that those
* tokens will be available at any point in the future.
*/
public long getStoredTokens() {
synchronized (this) {
updateTokensStored(System.currentTimeMillis());
}
return (long) this.tokensStored;
}
/**
* Note: this method should only be called while holding the class lock. For performance, the lock is not explicitly
* acquired.
*
* @return the wait until the tokens are available or negative if they can't be acquired in the give timeout.
*/
synchronized long tryReserveTokens(long tokens, long maxWaitMillis) {
long now = System.currentTimeMillis();
long waitUntilNextTokenAvailable = Math.max(0, this.nextTokenAvailableMillis - now);
updateTokensStored(now);
if (tokens <= this.tokensStored) {
this.tokensStored -= tokens;
return waitUntilNextTokenAvailable;
}
double additionalNeededTokens = tokens - this.tokensStored;
// casting to long will round towards 0
long additionalWaitForEnoughTokens = (long) (additionalNeededTokens / this.tokensPerMilli) + 1;
long totalWait = waitUntilNextTokenAvailable + additionalWaitForEnoughTokens;
if (totalWait > maxWaitMillis) {
return -1;
}
this.tokensStored = this.tokensPerMilli * additionalWaitForEnoughTokens - additionalNeededTokens;
this.nextTokenAvailableMillis = this.nextTokenAvailableMillis + additionalWaitForEnoughTokens;
return totalWait;
}
/**
* Note: this method should only be called while holding the class lock. For performance, the lock is not explicitly
* acquired.
*/
private void updateTokensStored(long now) {
if (now <= this.nextTokenAvailableMillis) {
return;
}
long millisUnaccounted = now - this.nextTokenAvailableMillis;
double newTokens = millisUnaccounted * this.tokensPerMilli;
this.nextTokenAvailableMillis = now;
this.tokensStored = Math.min(this.tokensStored + newTokens, Math.max(this.tokensStored, this.maxBucketSizeInTokens));
}
}
| 1,698 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ThrottlingPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
/**
* A throttling policy.
*/
public interface ThrottlingPolicy {
/**
* Comput the {@link PermitAllocation} for the given {@link PermitRequest}.
*/
PermitAllocation computePermitAllocation(PermitRequest request);
/**
* @return The parameters this policy is using. Used for description of the policy.
*/
Map<String, String> getParameters();
/**
* @return A human readable description of the policy.
*/
String getDescription();
}
| 1,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.