index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/OldApiHadoopFileInputSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.HadoopUtils;
/**
* An implementation of {@link org.apache.gobblin.source.Source} that uses a Hadoop {@link FileInputFormat} to get a
* {@link FileSplit} per {@link Extractor} return by {@link #getExtractor(WorkUnitState)} and a
* {@link RecordReader} to read the {@link FileSplit}.
*
* <p>
* This class is equivalent to {@link HadoopFileInputSource} in terms of functionality except that it uses
* the old Hadoop API.
* </p>
*
* <p>
* This class can read either keys of type {@link #<K>} or values of type {@link #<V>} supported by the
* given {@link FileInputFormat}, configurable by {@link HadoopFileInputSource#FILE_INPUT_READ_KEYS_KEY}.
* It will read keys if the property is set to {@code true}, otherwise it will read values. By default,
* it will read values through the given {@link FileInputFormat}.
* </p>
*
* <p>
* A concrete implementation of this class should implement {@link #getFileInputFormat(State, JobConf)}
* and {@link #getExtractor(WorkUnitState, RecordReader, FileSplit, boolean)}, which returns a
* {@link OldApiHadoopFileInputExtractor} that needs an concrete implementation.
* </p>
*
* @param <S> output schema type
* @param <D> output data record type
* @param <K> key type expected by the {@link FileInputFormat}
* @param <V> value type expected by the {@link FileInputFormat}
*
* @author Yinan Li
*/
public abstract class OldApiHadoopFileInputSource<S, D, K, V> extends AbstractSource<S, D> {
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
JobConf jobConf = new JobConf(new Configuration());
for (String key : state.getPropertyNames()) {
jobConf.set(key, state.getProp(key));
}
if (state.contains(HadoopFileInputSource.FILE_INPUT_PATHS_KEY)) {
for (String inputPath : state.getPropAsList(HadoopFileInputSource.FILE_INPUT_PATHS_KEY)) {
FileInputFormat.addInputPath(jobConf, new Path(inputPath));
}
}
try {
FileInputFormat<K, V> fileInputFormat = getFileInputFormat(state, jobConf);
InputSplit[] fileSplits = fileInputFormat.getSplits(jobConf, state.getPropAsInt(
HadoopFileInputSource.FILE_SPLITS_DESIRED_KEY, HadoopFileInputSource.DEFAULT_FILE_SPLITS_DESIRED));
if (fileSplits == null || fileSplits.length == 0) {
return ImmutableList.of();
}
Extract.TableType tableType = state.contains(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY) ?
Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase()) : null;
String tableNamespace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
String tableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);
List<WorkUnit> workUnits = Lists.newArrayListWithCapacity(fileSplits.length);
for (InputSplit inputSplit : fileSplits) {
// Create one WorkUnit per InputSplit
FileSplit fileSplit = (FileSplit) inputSplit;
Extract extract = createExtract(tableType, tableNamespace, tableName);
WorkUnit workUnit = WorkUnit.create(extract);
workUnit.setProp(HadoopFileInputSource.FILE_SPLIT_BYTES_STRING_KEY, HadoopUtils.serializeToString(fileSplit));
workUnit.setProp(HadoopFileInputSource.FILE_SPLIT_PATH_KEY, fileSplit.getPath().toString());
workUnits.add(workUnit);
}
return workUnits;
} catch (IOException ioe) {
throw new RuntimeException("Failed to get workunits", ioe);
}
}
@Override
public Extractor<S, D> getExtractor(WorkUnitState workUnitState) throws IOException {
if (!workUnitState.contains(HadoopFileInputSource.FILE_SPLIT_BYTES_STRING_KEY)) {
throw new IOException("No serialized FileSplit found in WorkUnitState " + workUnitState.getId());
}
JobConf jobConf = new JobConf(new Configuration());
for (String key : workUnitState.getPropertyNames()) {
jobConf.set(key, workUnitState.getProp(key));
}
String fileSplitBytesStr = workUnitState.getProp(HadoopFileInputSource.FILE_SPLIT_BYTES_STRING_KEY);
FileSplit fileSplit = (FileSplit) HadoopUtils.deserializeFromString(FileSplit.class, fileSplitBytesStr);
FileInputFormat<K, V> fileInputFormat = getFileInputFormat(workUnitState, jobConf);
RecordReader<K, V> recordReader = fileInputFormat.getRecordReader(fileSplit, jobConf, Reporter.NULL);
boolean readKeys = workUnitState.getPropAsBoolean(
HadoopFileInputSource.FILE_INPUT_READ_KEYS_KEY, HadoopFileInputSource.DEFAULT_FILE_INPUT_READ_KEYS);
return getExtractor(workUnitState, recordReader, fileSplit, readKeys);
}
@Override
public void shutdown(SourceState state) {
}
/**
* Get a {@link FileInputFormat} instance used to get {@link FileSplit}s and a {@link RecordReader}
* for every {@link FileSplit}.
*
* <p>
* This default implementation simply creates a new instance of a {@link FileInputFormat} class
* specified using the configuration property {@link HadoopFileInputSource#FILE_INPUT_FORMAT_CLASS_KEY}.
* </p>
*
* @param state a {@link State} object carrying configuration properties
* @param jobConf a Hadoop {@link JobConf} object carrying Hadoop configurations
* @return a {@link FileInputFormat} instance
*/
@SuppressWarnings("unchecked")
protected FileInputFormat<K, V> getFileInputFormat(State state, JobConf jobConf) {
Preconditions.checkArgument(state.contains(HadoopFileInputSource.FILE_INPUT_FORMAT_CLASS_KEY));
try {
return (FileInputFormat<K, V>) ReflectionUtils.newInstance(
Class.forName(state.getProp(HadoopFileInputSource.FILE_INPUT_FORMAT_CLASS_KEY)), new Configuration());
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
}
}
/**
* Get a {@link OldApiHadoopFileInputExtractor} instance.
*
* @param workUnitState a {@link WorkUnitState} object carrying Gobblin configuration properties
* @param recordReader a Hadoop {@link RecordReader} object used to read input records
* @param fileSplit the {@link FileSplit} to read input records from
* @param readKeys whether the {@link OldApiHadoopFileInputExtractor} should read keys of type {@link #<K>};
* by default values of type {@link #>V>} are read.
* @return a {@link OldApiHadoopFileInputExtractor} instance
*/
protected abstract OldApiHadoopFileInputExtractor<S, D, K, V> getExtractor(WorkUnitState workUnitState,
RecordReader<K, V> recordReader, FileSplit fileSplit, boolean readKeys);
}
| 3,000 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/OldApiWritableFileExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.RecordReader;
/**
* An extension of {@link OldApiHadoopFileInputExtractor} for extracting {@link Writable} records.
*
* @author Ziyang Liu
*/
public class OldApiWritableFileExtractor extends OldApiHadoopFileInputExtractor<Object, Writable, Object, Writable> {
public OldApiWritableFileExtractor(RecordReader<Object, Writable> recordReader, boolean readKeys) {
super(recordReader, readKeys);
}
@Override
public Object getSchema() throws IOException {
return null;
}
}
| 3,001 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/OldApiWritableFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.util.List;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.hive.HiveSerDeWrapper;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An extension of {@link OldApiHadoopFileInputSource} for sources in {@link Writable} format using a
* {@link org.apache.hadoop.mapred.FileInputFormat}.
*
* The {@link org.apache.hadoop.mapred.FileInputFormat} can either be specified using
* {@link HadoopFileInputSource#FILE_INPUT_FORMAT_CLASS_KEY}, or by specifying a deserializer via
* {@link HiveSerDeWrapper#SERDE_DESERIALIZER_TYPE}.
*
* @author Ziyang Liu
*/
public class OldApiWritableFileSource extends OldApiHadoopFileInputSource<Object, Writable, Object, Writable> {
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
if (!state.contains(HadoopFileInputSource.FILE_INPUT_FORMAT_CLASS_KEY)) {
state.setProp(HadoopFileInputSource.FILE_INPUT_FORMAT_CLASS_KEY,
HiveSerDeWrapper.getDeserializer(state).getInputFormatClassName());
}
return super.getWorkunits(state);
}
@Override
protected OldApiHadoopFileInputExtractor<Object, Writable, Object, Writable> getExtractor(WorkUnitState workUnitState,
RecordReader<Object, Writable> recordReader, FileSplit fileSplit, boolean readKeys) {
return new OldApiWritableFileExtractor(recordReader, readKeys);
}
}
| 3,002 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/HadoopTextInputSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An extension to {@link HadoopFileInputSource} that uses a {@link TextInputFormat}.
*
* <p>
* A concrete implementation of this class should at least implement the
* {@link #getExtractor(WorkUnitState, RecordReader, FileSplit, boolean)} method.
* </p>
*
* @param <S> output schema type
*
* @author Yinan Li
*/
public abstract class HadoopTextInputSource<S> extends HadoopFileInputSource<S, Text, LongWritable, Text> {
@Override
protected FileInputFormat<LongWritable, Text> getFileInputFormat(State state, Configuration configuration) {
return ReflectionUtils.newInstance(TextInputFormat.class, configuration);
}
}
| 3,003 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/AvroFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.filebased.FileBasedSource;
@Slf4j
public class AvroFileSource extends FileBasedSource<Schema, GenericRecord> {
@Override
public Extractor<Schema, GenericRecord> getExtractor(WorkUnitState state) throws IOException {
return new AvroFileExtractor(state);
}
@Override
public void initFileSystemHelper(State state) throws FileBasedHelperException {
this.fsHelper = new AvroFsHelper(state);
this.fsHelper.connect();
}
@Override
public List<String> getcurrentFsSnapshot(State state) {
List<String> results;
String path = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY);
try {
log.info("Running ls command with input " + path);
results = this.fsHelper.ls(path);
} catch (FileBasedHelperException e) {
String errMsg = String.format(
"Not able to run ls command due to %s. Will not pull any files", e.getMessage());
log.error(errMsg, e);
throw new RuntimeException(errMsg, e);
}
return results;
}
}
| 3,004 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/HadoopFileInputSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.HadoopUtils;
/**
* An implementation of {@link org.apache.gobblin.source.Source} that uses a Hadoop {@link FileInputFormat} to get a
* {@link FileSplit} per {@link Extractor} return by {@link #getExtractor(WorkUnitState)} and a
* {@link RecordReader} to read the {@link FileSplit}.
*
* <p>
* This class can read either keys of type K or values of type V supported by the
* given {@link FileInputFormat}, through the property {@link #FILE_INPUT_READ_KEYS_KEY}. It will read keys
* if the property is set to {@code true}, otherwise it will read values. By default, it will read values
* through the given {@link FileInputFormat}.
* </p>
*
* <p>
* A concrete implementation of this class should implement {@link #getFileInputFormat(State, Configuration)}
* and {@link #getExtractor(WorkUnitState, RecordReader, FileSplit, boolean)}, which returns a
* {@link HadoopFileInputExtractor} that needs an concrete implementation.
* </p>
*
* @param <S> output schema type
* @param <D> output data record type
* @param <K> key type expected by the {@link FileInputFormat}
* @param <V> value type expected by the {@link FileInputFormat}
*
* @author Yinan Li
*/
public abstract class HadoopFileInputSource<S, D, K, V> extends AbstractSource<S, D> {
private static final String HADOOP_SOURCE_KEY_PREFIX = "source.hadoop.";
public static final String FILE_INPUT_FORMAT_CLASS_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.input.format.class";
public static final String FILE_SPLITS_DESIRED_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.splits.desired";
public static final int DEFAULT_FILE_SPLITS_DESIRED = 1;
public static final String FILE_INPUT_PATHS_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.input.paths";
public static final String FILE_INPUT_SPLIT_MINSIZE = HADOOP_SOURCE_KEY_PREFIX + "file.input.split.minsize";
public static final String FILE_INPUT_SPLIT_MAXSIZE = HADOOP_SOURCE_KEY_PREFIX + "file.input.split.maxsize";
public static final String FILE_INPUT_READ_KEYS_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.read.keys";
public static final boolean DEFAULT_FILE_INPUT_READ_KEYS = false;
public static final String FILE_SPLIT_PATH_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.split.path";
static final String FILE_SPLIT_BYTES_STRING_KEY = HADOOP_SOURCE_KEY_PREFIX + "file.split.bytes.string";
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
Job job = Job.getInstance(new Configuration());
if (state.contains(FILE_INPUT_PATHS_KEY)) {
for (String inputPath : state.getPropAsList(FILE_INPUT_PATHS_KEY)) {
FileInputFormat.addInputPath(job, new Path(inputPath));
}
}
if (state.contains(FILE_INPUT_SPLIT_MINSIZE)) {
FileInputFormat.setMinInputSplitSize(job, state.getPropAsLong(FILE_INPUT_SPLIT_MINSIZE));
}
if (state.contains(FILE_INPUT_SPLIT_MAXSIZE)) {
FileInputFormat.setMaxInputSplitSize(job, state.getPropAsLong(FILE_INPUT_SPLIT_MAXSIZE));
}
FileInputFormat<K, V> fileInputFormat = getFileInputFormat(state, job.getConfiguration());
List<InputSplit> fileSplits = fileInputFormat.getSplits(job);
if (fileSplits == null || fileSplits.isEmpty()) {
return ImmutableList.of();
}
Extract.TableType tableType = state.contains(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY)
? Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase()) : null;
String tableNamespace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
String tableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);
List<WorkUnit> workUnits = Lists.newArrayListWithCapacity(fileSplits.size());
for (InputSplit inputSplit : fileSplits) {
// Create one WorkUnit per InputSplit
FileSplit fileSplit = (FileSplit) inputSplit;
Extract extract = createExtract(tableType, tableNamespace, tableName);
WorkUnit workUnit = WorkUnit.create(extract);
workUnit.setProp(FILE_SPLIT_BYTES_STRING_KEY, HadoopUtils.serializeToString(fileSplit));
workUnit.setProp(FILE_SPLIT_PATH_KEY, fileSplit.getPath().toString());
workUnits.add(workUnit);
}
return workUnits;
} catch (IOException ioe) {
throw new RuntimeException("Failed to get workunits", ioe);
}
}
@Override
public Extractor<S, D> getExtractor(WorkUnitState workUnitState) throws IOException {
if (!workUnitState.contains(FILE_SPLIT_BYTES_STRING_KEY)) {
throw new IOException("No serialized FileSplit found in WorkUnitState " + workUnitState.getId());
}
Configuration configuration = new Configuration();
FileInputFormat<K, V> fileInputFormat = getFileInputFormat(workUnitState, configuration);
String fileSplitBytesStr = workUnitState.getProp(FILE_SPLIT_BYTES_STRING_KEY);
FileSplit fileSplit = (FileSplit) HadoopUtils.deserializeFromString(FileSplit.class, fileSplitBytesStr);
TaskAttemptContext taskAttemptContext =
getTaskAttemptContext(configuration, DummyTaskAttemptIDFactory.newTaskAttemptID());
try {
RecordReader<K, V> recordReader = fileInputFormat.createRecordReader(fileSplit, taskAttemptContext);
recordReader.initialize(fileSplit, taskAttemptContext);
boolean readKeys = workUnitState.getPropAsBoolean(FILE_INPUT_READ_KEYS_KEY, DEFAULT_FILE_INPUT_READ_KEYS);
return getExtractor(workUnitState, recordReader, fileSplit, readKeys);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
@Override
public void shutdown(SourceState state) {
}
/**
* Get a {@link FileInputFormat} instance used to get {@link FileSplit}s and a {@link RecordReader}
* for every {@link FileSplit}.
*
* <p>
* This default implementation simply creates a new instance of a {@link FileInputFormat} class
* specified using the configuration property {@link #FILE_INPUT_FORMAT_CLASS_KEY}.
* </p>
*
* @param state a {@link State} object carrying configuration properties
* @param configuration a Hadoop {@link Configuration} object carrying Hadoop configurations
* @return a {@link FileInputFormat} instance
*/
@SuppressWarnings("unchecked")
protected FileInputFormat<K, V> getFileInputFormat(State state, Configuration configuration) {
Preconditions.checkArgument(state.contains(FILE_INPUT_FORMAT_CLASS_KEY));
try {
return (FileInputFormat<K, V>) ReflectionUtils
.newInstance(Class.forName(state.getProp(FILE_INPUT_FORMAT_CLASS_KEY)), configuration);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
}
}
/**
* Get a {@link HadoopFileInputExtractor} instance.
*
* @param workUnitState a {@link WorkUnitState} object carrying Gobblin configuration properties
* @param recordReader a Hadoop {@link RecordReader} object used to read input records
* @param fileSplit the {@link FileSplit} to read input records from
* @param readKeys whether the {@link OldApiHadoopFileInputExtractor} should read keys of type K;
* by default values of type V are read.
* @return a {@link HadoopFileInputExtractor} instance
*/
protected abstract HadoopFileInputExtractor<S, D, K, V> getExtractor(WorkUnitState workUnitState,
RecordReader<K, V> recordReader, FileSplit fileSplit, boolean readKeys);
private static TaskAttemptContext getTaskAttemptContext(Configuration configuration, TaskAttemptID taskAttemptID) {
Class<?> taskAttemptContextClass;
try {
// For Hadoop 2.x
taskAttemptContextClass = Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl");
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(cnfe);
}
try {
return (TaskAttemptContext) taskAttemptContextClass
.getDeclaredConstructor(Configuration.class, TaskAttemptID.class).newInstance(configuration, taskAttemptID);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* A factory class for creating new dummy {@link TaskAttemptID}s.
*
* <p>
* This class extends {@link TaskAttemptID} so it has access to some protected string constants
* in {@link TaskAttemptID}.
* </p>
*/
private static class DummyTaskAttemptIDFactory extends TaskAttemptID {
/**
* Create a new {@link TaskAttemptID} instance.
*
* @return a new {@link TaskAttemptID} instance
*/
public static TaskAttemptID newTaskAttemptID() {
return TaskAttemptID.forName(ATTEMPT + SEPARATOR + Long.toString(System.currentTimeMillis()) + SEPARATOR + 0
+ SEPARATOR + 'm' + SEPARATOR + 0 + SEPARATOR + 0);
}
}
}
| 3,005 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/OldApiHadoopFileInputExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
/**
* An implementation of {@link Extractor} that uses a Hadoop {@link RecordReader} to read records
* from a {@link org.apache.hadoop.mapred.FileSplit}.
*
* <p>
* This class is equivalent to {@link HadoopFileInputExtractor} in terms of functionality except that
* it uses the old Hadoop API.
* </p>
*
* <p>
* This class can read either keys of type {@link #<K>} or values of type {@link #<V>} using the
* given {@link RecordReader}, depending on the value of the second argument of the constructor
* {@link #OldApiHadoopFileInputExtractor(RecordReader, boolean)}. It will read keys if the argument
* is {@code true}, otherwise it will read values. Normally, this is specified using the property
* {@link HadoopFileInputSource#FILE_INPUT_READ_KEYS_KEY}, which is {@code false} by default.
* </p>
*
* <p>
* This class provides a default implementation of {@link #readRecord(Object)} that simply casts
* the keys or values read by the {@link RecordReader} into type {@link #<D>}. It is required
* that type {@link #<K>} or {@link #<V>} can be safely casted to type {@link #<D>}.
* </p>
*
* <p>
* The Hadoop {@link RecordReader} is passed into this class, which is responsible for closing
* it by calling {@link RecordReader#close()} in {@link #close()}.
* </p>
*
* <p>
* A concrete implementation of this class should at least implement the {@link #getSchema()}
* method.
* </p>
*
* @param <S> output schema type
* @param <D> output data record type that MUST be compatible with either {@link #<K>} or {@link #<V>}
* @param <K> key type expected by the {@link RecordReader}
* @param <V> value type expected by the {@link RecordReader}
*
* @author Yinan Li
*/
public abstract class OldApiHadoopFileInputExtractor<S, D, K, V> implements Extractor<S, D> {
private final RecordReader<K, V> recordReader;
private final boolean readKeys;
public OldApiHadoopFileInputExtractor(RecordReader<K, V> recordReader, boolean readKeys) {
this.recordReader = recordReader;
this.readKeys = readKeys;
}
/**
* {@inheritDoc}.
*
* This method will throw a {@link ClassCastException} if type {@link #<D>} is not compatible
* with type {@link #<K>} if keys are supposed to be read, or if it is not compatible with type
* {@link #<V>} if values are supposed to be read.
*/
@Override
@SuppressWarnings("unchecked")
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
K key = this.recordReader.createKey();
V value = this.recordReader.createValue();
if (this.recordReader.next(key, value)) {
return this.readKeys ? (D) key : (D) value;
}
return null;
}
@Override
public long getExpectedRecordCount() {
return -1l;
}
@Override
public long getHighWatermark() {
return -1l;
}
@Override
public void close() throws IOException {
this.recordReader.close();
}
}
| 3,006 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/AvroFsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.filebased.SizeAwareFileBasedHelper;
import org.apache.gobblin.source.extractor.utils.ProxyFsInput;
import org.apache.gobblin.util.HadoopUtils;
public class AvroFsHelper extends HadoopFsHelper implements SizeAwareFileBasedHelper {
private static final Logger LOGGER = LoggerFactory.getLogger(AvroFsHelper.class);
public AvroFsHelper(State state) {
this(state, HadoopUtils.newConfiguration());
}
public AvroFsHelper(State state, Configuration configuration) {
super(state, configuration);
}
public Schema getAvroSchema(String file) throws FileBasedHelperException {
DataFileReader<GenericRecord> dfr = null;
try {
if (this.getState().getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER,
ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) {
dfr = new DataFileReader<>(new ProxyFsInput(new Path(file), this.getFileSystem()),
new GenericDatumReader<GenericRecord>());
} else {
dfr = new DataFileReader<>(new FsInput(new Path(file), this.getFileSystem().getConf()),
new GenericDatumReader<GenericRecord>());
}
return dfr.getSchema();
} catch (IOException e) {
throw new FileBasedHelperException("Failed to open avro file " + file + " due to error " + e.getMessage(), e);
} finally {
if (dfr != null) {
try {
dfr.close();
} catch (IOException e) {
LOGGER.error("Failed to close avro file " + file, e);
}
}
}
}
/**
* Returns an {@link DataFileReader} to the specified avro file.
* <p>
* Note: It is the caller's responsibility to close the returned {@link DataFileReader}.
* </p>
*
* @param file The path to the avro file to open.
* @return A {@link DataFileReader} for the specified avro file.
* @throws FileBasedHelperException if there is a problem opening the {@link InputStream} for the specified file.
*/
public DataFileReader<GenericRecord> getAvroFile(String file) throws FileBasedHelperException {
try {
if (!this.getFileSystem().exists(new Path(file))) {
LOGGER.warn(file + " does not exist.");
return null;
}
if (this.getState().getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER,
ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) {
return new DataFileReader<>(new ProxyFsInput(new Path(file), this.getFileSystem()),
new GenericDatumReader<GenericRecord>());
}
return new DataFileReader<>(new FsInput(new Path(file), this.getFileSystem().getConf()),
new GenericDatumReader<GenericRecord>());
} catch (IOException e) {
throw new FileBasedHelperException("Failed to open avro file " + file + " due to error " + e.getMessage(), e);
}
}
@Override
public long getFileSize(String filePath) throws FileBasedHelperException {
try {
return this.getFileSystem().getFileStatus(new Path(filePath)).getLen();
} catch (IOException e) {
throw new FileBasedHelperException(
String.format("Failed to get size for file at path %s due to error %s", filePath, e.getMessage()), e);
}
}
}
| 3,007 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/AvroFileExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.util.Iterator;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Throwables;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.filebased.FileBasedExtractor;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
/**
* A custom type of {@link FileBasedExtractor}s for extracting data from Avro files.
*/
public class AvroFileExtractor extends FileBasedExtractor<Schema, GenericRecord> {
private Schema extractorCachedSchema;
public AvroFileExtractor(WorkUnitState workUnitState) {
super(workUnitState, new AvroFsHelper(workUnitState));
}
@Override
public Iterator<GenericRecord> downloadFile(String file)
throws IOException {
try {
return this.closer.register(((AvroFsHelper) this.fsHelper).getAvroFile(file));
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
}
return null;
}
/**
* Assumption is that all files in the input directory have the same schema.
* This method is being invoked in org.apache.gobblin.runtime.Task#runSynchronousModel()
*/
@Override
public Schema getSchema() {
if (this.workUnit.contains(ConfigurationKeys.SOURCE_SCHEMA)) {
return new Schema.Parser().parse(this.workUnit.getProp(ConfigurationKeys.SOURCE_SCHEMA));
}
if (extractorCachedSchema != null) {
return extractorCachedSchema;
}
AvroFsHelper hfsHelper = (AvroFsHelper) this.fsHelper;
if (this.filesToPull.isEmpty()) {
return null;
}
try {
extractorCachedSchema = hfsHelper.getAvroSchema(this.filesToPull.get(0));
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
return null;
}
return extractorCachedSchema;
}
}
| 3,008 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/HadoopFileInputExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
/**
* An implementation of {@link Extractor} that uses a Hadoop {@link RecordReader} to read records
* from a {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}.
*
* <p>
* This class can read either keys of type K or values of type V using the
* given {@link RecordReader}, depending on the value of the second argument of the constructor
* {@link #HadoopFileInputExtractor(RecordReader, boolean)}. It will read keys if the argument
* is {@code true}, otherwise it will read values. Normally, this is specified using the property
* {@link HadoopFileInputSource#FILE_INPUT_READ_KEYS_KEY}, which is {@code false} by default.
* </p>
*
* <p>
* This class provides a default implementation of {@link #readRecord(Object)} that simply casts
* the keys or values read by the {@link RecordReader} into type D. It is required
* that type K or V can be safely casted to type D.
* </p>
*
* <p>
* The Hadoop {@link RecordReader} is passed into this class, which is responsible for closing
* it by calling {@link RecordReader#close()} in {@link #close()}.
* </p>
*
* <p>
* A concrete implementation of this class should at least implement the {@link #getSchema()}
* method.
* </p>
*
* @param S output schema type
* @param D output data record type that MUST be compatible with either K or V
* @param K key type expected by the {@link RecordReader}
* @param V value type expected by the {@link RecordReader}
*
* @author Yinan Li
*/
public abstract class HadoopFileInputExtractor<S, D, K, V> implements Extractor<S, D> {
private final RecordReader<K, V> recordReader;
private final boolean readKeys;
public HadoopFileInputExtractor(RecordReader<K, V> recordReader, boolean readKeys) {
this.recordReader = recordReader;
this.readKeys = readKeys;
}
/**
* {@inheritDoc}.
*
* This method will throw a {@link ClassCastException} if type {@link #<D>} is not compatible
* with type {@link #<K>} if keys are supposed to be read, or if it is not compatible with type
* {@link #<V>} if values are supposed to be read.
*/
@Override
@SuppressWarnings("unchecked")
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
try {
if (this.recordReader.nextKeyValue()) {
return this.readKeys ? (D) this.recordReader.getCurrentKey() : (D) this.recordReader.getCurrentValue();
}
} catch (InterruptedException ie) {
throw new IOException(ie);
}
return null;
}
@Override
public long getExpectedRecordCount() {
return -1l;
}
@Override
public long getHighWatermark() {
return -1l;
}
@Override
public void close() throws IOException {
this.recordReader.close();
}
}
| 3,009 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/SourceSpecificLayer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An interface for source extractors
*
* @param <D> type of data record
* @param <S> type of schema
*/
public interface SourceSpecificLayer<S, D> {
/**
* Metadata to extract raw schema(like url, query)
*
* @param source schema name
* @param source entity name
* @return list of commands to get schema
* @throws SchemaException if there is anything wrong in building metadata for schema extraction
*/
public List<Command> getSchemaMetadata(String schema, String entity)
throws SchemaException;
/**
* Raw schema from the response
*
* @param response is the output from a source call
* @return S representation of the schema
* @throws SchemaException if there is anything wrong in getting raw schema
*/
public S getSchema(CommandOutput<?, ?> response)
throws SchemaException, IOException;
/**
* Metadata for high watermark(like url, query)
*
* @param source schema name
* @param source entity name
* @param water mark column
* @param lis of all predicates that needs to be applied
* @return list of commands to get the high watermark
* @throws org.apache.gobblin.source.extractor.exception.HighWatermarkException if there is anything wrong in building metadata to get high watermark
*/
public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn,
List<Predicate> predicateList)
throws HighWatermarkException;
/**
* High watermark from the response
*
* @param source schema name
* @param source entity name
* @param water mark column
* @param lis of all predicates that needs to be applied
* @return high water mark from source
* @throws HighWatermarkException if there is anything wrong in building metadata to get high watermark
*/
public long getHighWatermark(CommandOutput<?, ?> response, String watermarkColumn, String predicateColumnFormat)
throws HighWatermarkException;
/**
* Metadata for record count(like url, query)
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param lis of all predicates that needs to be applied
* @return list of commands to get the count
* @throws RecordCountException if there is anything wrong in building metadata for record counts
*/
public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws RecordCountException;
/**
* Record count from the response
*
* @return record count
* @throws RecordCountException if there is anything wrong in getting record count
*/
public long getCount(CommandOutput<?, ?> response)
throws RecordCountException;
/**
* Metadata for data records(like url, query)
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param list of all predicates that needs to be applied
* @return list of commands to get the data
* @throws org.apache.gobblin.source.extractor.DataRecordException if there is anything wrong in building metadata for data records
*/
public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws DataRecordException;
/**
* Set of data records from the response
*
* @return Iterator over objects of type D
* @throws DataRecordException if there is anything wrong in getting data records
*/
public Iterator<D> getData(CommandOutput<?, ?> response)
throws DataRecordException, IOException;
/**
* Data type of source
*
* @return Map of source and target data types
*/
public Map<String, String> getDataTypeMap();
/**
* Get records using source specific api (Example: bulk api in salesforce source)
* record set: data records with an iterator
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param list of all predicates that needs to be applied
* @return iterator with set of records
* @throws SchemaException if there is anything wrong in getting data records
*/
public Iterator<D> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit,
List<Predicate> predicateList)
throws IOException;
}
| 3,010 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/QueryBasedExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.MDC;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.exception.ExtractPrepareException;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.schema.ArrayDataType;
import org.apache.gobblin.source.extractor.schema.DataType;
import org.apache.gobblin.source.extractor.schema.EnumDataType;
import org.apache.gobblin.source.extractor.schema.MapDataType;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkPredicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of common extractor for query based sources.
*
* @param <D> type of data record
* @param <S> type of schema
*/
@Slf4j
public abstract class QueryBasedExtractor<S, D> implements Extractor<S, D>, ProtocolSpecificLayer<S, D> {
private static final Gson GSON = new Gson();
protected final WorkUnitState workUnitState;
protected final WorkUnit workUnit;
private final String entity;
private final String schema;
private final Partition partition;
private boolean fetchStatus = true;
private S outputSchema;
private long sourceRecordCount = 0;
private long highWatermark;
private Iterator<D> iterator;
protected final List<String> columnList = new ArrayList<>();
@VisibleForTesting
protected final List<Predicate> predicateList = new ArrayList<>();
private S getOutputSchema() {
return this.outputSchema;
}
protected void setOutputSchema(S outputSchema) {
this.outputSchema = outputSchema;
}
private long getSourceRecordCount() {
return this.sourceRecordCount;
}
public boolean getFetchStatus() {
return this.fetchStatus;
}
public void setFetchStatus(boolean fetchStatus) {
this.fetchStatus = fetchStatus;
}
public void setHighWatermark(long highWatermark) {
this.highWatermark = highWatermark;
}
private boolean isPullRequired() {
return getFetchStatus();
}
protected boolean isInitialPull() {
return this.iterator == null;
}
public QueryBasedExtractor(WorkUnitState workUnitState) {
this.workUnitState = workUnitState;
this.workUnit = this.workUnitState.getWorkunit();
this.schema = this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA);
this.entity = this.workUnitState.getProp(ConfigurationKeys.SOURCE_ENTITY);
partition = Partition.deserialize(workUnit);
MDC.put("tableName", getWorkUnitName());
}
private String getWorkUnitName() {
StringBuilder sb = new StringBuilder();
sb.append("[");
sb.append(StringUtils.stripToEmpty(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA)));
sb.append("_");
sb.append(StringUtils.stripToEmpty(this.workUnitState.getProp(ConfigurationKeys.SOURCE_ENTITY)));
sb.append("_");
String id = this.workUnitState.getId();
int seqIndex = id.lastIndexOf("_", id.length());
if (seqIndex > 0) {
String timeSeqStr = id.substring(0, seqIndex);
int timeIndex = timeSeqStr.lastIndexOf("_", timeSeqStr.length());
if (timeIndex > 0) {
sb.append(id.substring(timeIndex + 1));
}
}
sb.append("]");
return sb.toString();
}
@Override
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
if (!this.isPullRequired()) {
log.info("No more records to read");
return null;
}
D nextElement = null;
try {
if (isInitialPull()) {
log.info("Initial pull");
if (shouldRemoveDataPullUpperBounds()) {
this.removeDataPullUpperBounds();
}
this.iterator = this.getIterator();
}
if (this.iterator.hasNext()) {
nextElement = this.iterator.next();
if (!this.iterator.hasNext()) {
log.debug("Getting next pull");
this.iterator = this.getIterator();
if (this.iterator == null) {
this.setFetchStatus(false);
}
}
}
} catch (Exception e) {
throw new DataRecordException("Failed to get records using rest api; error - " + e.getMessage(), e);
}
return nextElement;
}
/**
* Check if it's appropriate to remove data pull upper bounds in the last work unit, fetching as much data as possible
* from the source. As between the time when data query was created and that was executed, there might be some
* new data generated in the source. Removing the upper bounds will help us grab the new data.
*
* Note: It's expected that there might be some duplicate data between runs because of removing the upper bounds
*
* @return should remove or not
*/
private boolean shouldRemoveDataPullUpperBounds() {
if (!this.workUnitState.getPropAsBoolean(ConfigurationKeys.SOURCE_QUERYBASED_ALLOW_REMOVE_UPPER_BOUNDS, true)) {
return false;
}
// Only consider the last work unit
if (!partition.isLastPartition()) {
return false;
}
// Don't remove if user specifies one or is recorded in previous run
if (partition.getHasUserSpecifiedHighWatermark() ||
this.workUnitState.getProp(ConfigurationKeys.WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY) != null) {
return false;
}
return true;
}
/**
* Remove all upper bounds in the predicateList used for pulling data
*/
private void removeDataPullUpperBounds() {
log.info("Removing data pull upper bound for last work unit");
Iterator<Predicate> it = predicateList.iterator();
while (it.hasNext()) {
Predicate predicate = it.next();
if (predicate.getType() == Predicate.PredicateType.HWM) {
log.info("Remove predicate: " + predicate.condition);
it.remove();
}
}
}
/**
* Get iterator from protocol specific api if is.specific.api.active is false
* Get iterator from source specific api if is.specific.api.active is true
* @return iterator
*/
private Iterator<D> getIterator() throws DataRecordException, IOException {
if (Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_SPECIFIC_API_ACTIVE))) {
return this.getRecordSetFromSourceApi(this.schema, this.entity, this.workUnit, this.predicateList);
}
return this.getRecordSet(this.schema, this.entity, this.workUnit, this.predicateList);
}
/**
* get source record count from source
* @return record count
*/
@Override
public long getExpectedRecordCount() {
return this.getSourceRecordCount();
}
/**
* get schema(Metadata) corresponding to the data records
* @return schema
*/
@Override
public S getSchema() {
return this.getOutputSchema();
}
/**
* get high watermark of the current pull
* @return high watermark
*/
@Override
public long getHighWatermark() {
return this.highWatermark;
}
/**
* close extractor read stream
* update high watermark
*/
@Override
public void close() {
log.info("Updating the current state high water mark with " + this.highWatermark);
this.workUnitState.setActualHighWatermark(new LongWatermark(this.highWatermark));
try {
this.closeConnection();
} catch (Exception e) {
log.error("Failed to close the extractor", e);
}
}
/**
* @return full dump or not
*/
public boolean isFullDump() {
return Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY));
}
/**
* build schema, record count and high water mark
*/
public Extractor<S, D> build() throws ExtractPrepareException {
String watermarkColumn = this.workUnitState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
long lwm = partition.getLowWatermark();
long hwm = partition.getHighWatermark();
log.info("Low water mark: " + lwm + "; and High water mark: " + hwm);
WatermarkType watermarkType;
if (StringUtils.isBlank(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE))) {
watermarkType = null;
} else {
watermarkType = WatermarkType
.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE).toUpperCase());
}
log.info("Source Entity is " + this.entity);
try {
this.setTimeOut(
this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_TIMEOUT, ConfigurationKeys.DEFAULT_CONN_TIMEOUT));
this.extractMetadata(this.schema, this.entity, this.workUnit);
if (StringUtils.isNotBlank(watermarkColumn)) {
if (partition.isLastPartition()) {
// Get a more accurate high watermark from the source
long adjustedHighWatermark = this.getLatestWatermark(watermarkColumn, watermarkType, lwm, hwm);
log.info("High water mark from source: " + adjustedHighWatermark);
// If the source reports a finer high watermark, then consider the same as runtime high watermark.
// Else, consider the low watermark as high water mark(with no delta).i.e, don't move the pointer
if (adjustedHighWatermark == ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
adjustedHighWatermark = getLowWatermarkWithNoDelta(lwm);
}
this.highWatermark = adjustedHighWatermark;
} else {
this.highWatermark = hwm;
}
log.info("High water mark for the current run: " + highWatermark);
this.setRangePredicates(watermarkColumn, watermarkType, lwm, highWatermark);
}
// if it is set to true, skip count calculation and set source count to -1
if (!Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SKIP_COUNT_CALC))) {
this.sourceRecordCount = this.getSourceCount(this.schema, this.entity, this.workUnit, this.predicateList);
} else {
log.info("Skip count calculation");
this.sourceRecordCount = -1;
}
if (this.sourceRecordCount == 0) {
log.info("Record count is 0; Setting fetch status to false to skip readRecord()");
this.setFetchStatus(false);
}
} catch (SchemaException e) {
throw new ExtractPrepareException("Failed to get schema for this object; error - " + e.getMessage(), e);
} catch (HighWatermarkException e) {
throw new ExtractPrepareException("Failed to get high watermark; error - " + e.getMessage(), e);
} catch (RecordCountException e) {
throw new ExtractPrepareException("Failed to get record count; error - " + e.getMessage(), e);
} catch (Exception e) {
throw new ExtractPrepareException("Failed to prepare the extract build; error - " + e.getMessage(), e);
}
return this;
}
private long getLowWatermarkWithNoDelta(long lwm) {
if (lwm == ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
return ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
}
String watermarkType = this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, "TIMESTAMP");
WatermarkType wmType = WatermarkType.valueOf(watermarkType.toUpperCase());
int deltaNum = new WatermarkPredicate(wmType).getDeltaNumForNextWatermark();
switch (wmType) {
case SIMPLE:
return lwm - deltaNum;
default:
Date lowWaterMarkDate = Utils.toDate(lwm, "yyyyMMddHHmmss");
return Long
.parseLong(Utils.dateToString(Utils.addSecondsToDate(lowWaterMarkDate, deltaNum * -1), "yyyyMMddHHmmss"));
}
}
/**
* if snapshot extract, get latest watermark else return work unit high watermark
*
* @param watermarkColumn watermark column
* @param lwmValue low watermark value
* @param hwmValue high watermark value
* @param watermarkType watermark type
* @return latest watermark
* @throws IOException
*/
private long getLatestWatermark(String watermarkColumn, WatermarkType watermarkType, long lwmValue, long hwmValue)
throws HighWatermarkException, IOException {
if (!Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SKIP_HIGH_WATERMARK_CALC))) {
log.info("Getting high watermark");
List<Predicate> list = new ArrayList<>();
WatermarkPredicate watermark = new WatermarkPredicate(watermarkColumn, watermarkType);
String lwmOperator = partition.isLowWatermarkInclusive() ? ">=" : ">";
String hwmOperator = (partition.isLastPartition() || partition.isHighWatermarkInclusive()) ? "<=" : "<";
Predicate lwmPredicate = watermark.getPredicate(this, lwmValue, lwmOperator, Predicate.PredicateType.LWM);
Predicate hwmPredicate = watermark.getPredicate(this, hwmValue, hwmOperator, Predicate.PredicateType.HWM);
if (lwmPredicate != null) {
list.add(lwmPredicate);
}
if (hwmPredicate != null) {
list.add(hwmPredicate);
}
return this.getMaxWatermark(this.schema, this.entity, watermarkColumn, list,
watermark.getWatermarkSourceFormat(this));
}
return hwmValue;
}
/**
* range predicates for watermark column and transaction columns.
*
* @param watermarkColumn name of the column used as watermark
* @param watermarkType watermark type
* @param lwmValue estimated low watermark value
* @param hwmValue estimated high watermark value
*/
private void setRangePredicates(String watermarkColumn, WatermarkType watermarkType, long lwmValue, long hwmValue) {
log.debug("Getting range predicates");
String lwmOperator = partition.isLowWatermarkInclusive() ? ">=" : ">";
String hwmOperator = (partition.isLastPartition() || partition.isHighWatermarkInclusive()) ? "<=" : "<";
WatermarkPredicate watermark = new WatermarkPredicate(watermarkColumn, watermarkType);
this.addPredicates(watermark.getPredicate(this, lwmValue, lwmOperator, Predicate.PredicateType.LWM));
this.addPredicates(watermark.getPredicate(this, hwmValue, hwmOperator, Predicate.PredicateType.HWM));
if (Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_HOURLY_EXTRACT))) {
String hourColumn = this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_HOUR_COLUMN);
if (StringUtils.isNotBlank(hourColumn)) {
WatermarkPredicate hourlyWatermark = new WatermarkPredicate(hourColumn, WatermarkType.HOUR);
this.addPredicates(hourlyWatermark.getPredicate(this, lwmValue, lwmOperator, Predicate.PredicateType.LWM));
this.addPredicates(hourlyWatermark.getPredicate(this, hwmValue, hwmOperator, Predicate.PredicateType.HWM));
}
}
}
/**
* add predicate to the predicate list
* @param predicate watermark predicate(watermark column,type,format and condition)
*/
private void addPredicates(Predicate predicate) {
if (predicate != null) {
this.predicateList.add(predicate);
}
}
/**
* @param watermarkColumn list of watermark columns
* @param columnName name to search for
* @return true, if column name is part of water mark columns. otherwise, return false
*/
protected boolean isWatermarkColumn(String watermarkColumn, String columnName) {
if (columnName != null) {
columnName = columnName.toLowerCase();
}
if (StringUtils.isNotBlank(watermarkColumn)) {
List<String> waterMarkColumnList = Arrays.asList(watermarkColumn.toLowerCase().split(","));
if (waterMarkColumnList.contains(columnName)) {
return true;
}
}
return false;
}
/**
* @param watermarkColumn list of watermark columns
* @return true, if there are multiple water mark columns. otherwise, return false
*/
protected boolean hasMultipleWatermarkColumns(String watermarkColumn) {
if (StringUtils.isBlank(watermarkColumn)) {
return false;
}
return Arrays.asList(watermarkColumn.toLowerCase().split(",")).size() > 1;
}
/**
* @param primarykeyColumn list of primary key columns
* @param columnName name to search for
* @return index of the column if it exist in given list of primary key columns. otherwise, return 0
*/
protected int getPrimarykeyIndex(String primarykeyColumn, String columnName) {
if (columnName != null) {
columnName = columnName.toLowerCase();
}
if (StringUtils.isNotBlank(primarykeyColumn)) {
List<String> primarykeyColumnList = Arrays.asList(primarykeyColumn.toLowerCase().split(","));
return primarykeyColumnList.indexOf(columnName) + 1;
}
return 0;
}
/**
* @param columnName name to search for
* @param columnList list of metadata columns
* @return true if column is part of metadata columns. otherwise, return false.
*/
protected boolean isMetadataColumn(String columnName, List<String> columnList) {
boolean isColumnCheckEnabled =
Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED,
ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED));
if (!isColumnCheckEnabled) {
return true;
}
columnName = columnName.trim().toLowerCase();
if (columnList.contains(columnName)) {
return true;
}
return false;
}
/**
* @param columnName column name
* @param type data type
* @param elementType type of elements
* @param enumSymbols emum symbols
* @return converted data type
*/
protected JsonObject convertDataType(String columnName, String type, String elementType, List<String> enumSymbols) {
String dataType = this.getDataTypeMap().get(type);
if (dataType == null) {
dataType = "string";
}
DataType convertedDataType;
if (dataType.equals("map")) {
convertedDataType = new MapDataType(dataType, elementType);
} else if (dataType.equals("array")) {
convertedDataType = new ArrayDataType(dataType, elementType);
} else if (dataType.equals("enum")) {
convertedDataType = new EnumDataType(dataType, columnName, enumSymbols);
} else {
convertedDataType = new DataType(dataType);
}
return GSON.fromJson(GSON.toJson(convertedDataType), JsonObject.class).getAsJsonObject();
}
/**
* @param predicateList predicate list
* @return true, if there are any predicates. otherwise, return false.
*/
protected boolean isPredicateExists(List<Predicate> predicateList) {
if (predicateList == null || predicateList.isEmpty()) {
return false;
}
return true;
}
}
| 3,011 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/Command.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.util.Collection;
import java.util.List;
/**
* Interface for a source command (e.g. REST, SFTP, etc.)
* Specifies a Command along with a CommandType and a list of parameters
* @author stakiar
*/
public interface Command {
public List<String> getParams();
public CommandType getCommandType();
public Command build(Collection<String> params, CommandType cmd);
}
| 3,012 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/QueryBasedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.slf4j.MDC;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientCache;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.api.VersionDoesNotExistException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.partition.Partitioner;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.dataset.DatasetUtils;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
/**
* A base implementation of {@link org.apache.gobblin.source.Source} for
* query-based sources.
*/
@Slf4j
public abstract class QueryBasedSource<S, D> extends AbstractSource<S, D> {
public static final String ENTITY_BLACKLIST = "entity.blacklist";
public static final String ENTITY_WHITELIST = "entity.whitelist";
public static final String SOURCE_OBTAIN_TABLE_PROPS_FROM_CONFIG_STORE =
"source.obtain_table_props_from_config_store";
public static final boolean DEFAULT_SOURCE_OBTAIN_TABLE_PROPS_FROM_CONFIG_STORE = false;
private static final String QUERY_BASED_SOURCE = "query_based_source";
public static final String WORK_UNIT_STATE_VERSION_KEY = "source.querybased.workUnitState.version";
/**
* WorkUnit Version 3:
* SOURCE_ENTITY = as specified in job config
* EXTRACT_TABLE_NAME_KEY = as specified in job config or sanitized version of SOURCE_ENTITY
* WorkUnit Version 2 (implicit):
* SOURCE_ENTITY = sanitized version of SOURCE_ENTITY in job config
* EXTRACT_TABLE_NAME_KEY = as specified in job config
* WorkUnit Version 1 (implicit):
* SOURCE_ENTITY = as specified in job config
* EXTRACT_TABLE_NAME_KEY = as specified in job config
*/
public static final Integer CURRENT_WORK_UNIT_STATE_VERSION = 3;
protected Optional<LineageInfo> lineageInfo;
/** A class that encapsulates a source entity (aka dataset) to be processed */
@Data
public static final class SourceEntity {
/**
* The name of the source entity (as specified in the source) to be processed. For example,
* this can be a table name.
*/
private final String sourceEntityName;
/**
* The destination table name. This is explicitly specified in the config or is derived from
* the sourceEntityName.
*/
private final String destTableName;
/** A string that identifies the source entity */
public String getDatasetName() {
return sourceEntityName;
}
static String sanitizeEntityName(String entity) {
return Utils.escapeSpecialCharacters(entity, ConfigurationKeys.ESCAPE_CHARS_IN_TABLE_NAME, "_");
}
public static SourceEntity fromSourceEntityName(String sourceEntityName) {
return new SourceEntity(sourceEntityName, sanitizeEntityName(sourceEntityName));
}
public static Optional<SourceEntity> fromState(State state) {
String sourceEntityName;
String destTableName;
if (state.contains(ConfigurationKeys.SOURCE_ENTITY)) {
sourceEntityName = state.getProp(ConfigurationKeys.SOURCE_ENTITY);
destTableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY,
sanitizeEntityName(sourceEntityName));
}
else if (state.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY)) {
destTableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);
sourceEntityName = destTableName;
}
else {
return Optional.absent();
}
return Optional.of(new SourceEntity(sourceEntityName, destTableName));
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SourceEntity other = (SourceEntity) obj;
if (getDatasetName() == null) {
if (other.getDatasetName() != null)
return false;
} else if (!getDatasetName().equals(other.getDatasetName()))
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((getDatasetName() == null) ? 0 : getDatasetName().hashCode());
return result;
}
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
initLogger(state);
lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
List<WorkUnit> workUnits = Lists.newArrayList();
// Map<String, String> tableNameToEntityMap = Maps.newHashMap();
Set<SourceEntity> entities = getFilteredSourceEntities(state);
Map<SourceEntity, State> tableSpecificPropsMap = shouldObtainTablePropsFromConfigStore(state)
? getTableSpecificPropsFromConfigStore(entities, state)
: getTableSpecificPropsFromState(entities, state);
Map<SourceEntity, Long> prevWatermarksByTable = getPreviousWatermarksForAllTables(state);
for (SourceEntity sourceEntity : Sets.union(entities, prevWatermarksByTable.keySet())) {
log.info("Source entity to be processed: {}, carry-over from previous state: {} ",
sourceEntity, !entities.contains(sourceEntity));
SourceState combinedState = getCombinedState(state, tableSpecificPropsMap.get(sourceEntity));
long previousWatermark = prevWatermarksByTable.containsKey(sourceEntity) ?
prevWatermarksByTable.get(sourceEntity)
: ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
// If a table name exists in prevWatermarksByTable (i.e., it has a previous watermark) but does not exist
// in talbeNameToEntityMap, create an empty workunit for it, so that its previous watermark is preserved.
// This is done by overriding the high watermark to be the same as the previous watermark.
if (!entities.contains(sourceEntity)) {
combinedState.setProp(ConfigurationKeys.SOURCE_QUERYBASED_END_VALUE, previousWatermark);
}
workUnits.addAll(generateWorkUnits(sourceEntity, combinedState, previousWatermark));
}
log.info("Total number of workunits for the current run: " + workUnits.size());
List<WorkUnit> previousWorkUnits = this.getPreviousWorkUnitsForRetry(state);
log.info("Total number of incomplete tasks from the previous run: " + previousWorkUnits.size());
workUnits.addAll(previousWorkUnits);
int numOfMultiWorkunits =
state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY, ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);
return pack(workUnits, numOfMultiWorkunits);
}
protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, SourceState state, long previousWatermark) {
List<WorkUnit> workUnits = Lists.newArrayList();
String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
TableType tableType =
TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
List<Partition> partitions = new Partitioner(state).getPartitionList(previousWatermark);
Collections.sort(partitions, Partitioner.ascendingComparator);
// {@link ConfigurationKeys.EXTRACT_TABLE_NAME_KEY} specify the output path for Extract
String outputTableName = sourceEntity.getDestTableName();
log.info("Create extract output with table name is " + outputTableName);
Extract extract = createExtract(tableType, nameSpaceName, outputTableName);
// Setting current time for the full extract
if (Boolean.valueOf(state.getProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY))) {
extract.setFullTrue(System.currentTimeMillis());
}
Optional<Long> highestWaterMark = Optional.absent();
Optional<Long> lowestWaterMark = Optional.absent();
for (Partition partition : partitions) {
WorkUnit workunit = WorkUnit.create(extract);
workunit.setProp(ConfigurationKeys.SOURCE_ENTITY, sourceEntity.getSourceEntityName());
workunit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, sourceEntity.getDestTableName());
workunit.setProp(WORK_UNIT_STATE_VERSION_KEY, CURRENT_WORK_UNIT_STATE_VERSION);
addLineageSourceInfo(state, sourceEntity, workunit);
partition.serialize(workunit);
workUnits.add(workunit);
highestWaterMark = highestWaterMark.isPresent() ?
highestWaterMark.transform(hw -> Math.max(hw, partition.getHighWatermark())) : Optional.of(partition.getHighWatermark());
lowestWaterMark = lowestWaterMark.isPresent() ?
lowestWaterMark.transform(lw -> Math.min(lw, partition.getLowWatermark())) : Optional.of(partition.getLowWatermark());
}
if(highestWaterMark.isPresent() && lowestWaterMark.isPresent()) {
state.appendToListProp(TimingEvent.FlowEventConstants.HIGH_WATERMARK_FIELD, String.format("%s.%s: %s", sourceEntity.getDatasetName(), sourceEntity.destTableName, highestWaterMark.get()));
state.appendToListProp(TimingEvent.FlowEventConstants.LOW_WATERMARK_FIELD, String.format("%s.%s: %s", sourceEntity.getDatasetName(), sourceEntity.destTableName, lowestWaterMark.get()));
}
return workUnits;
}
protected void addLineageSourceInfo(SourceState sourceState, SourceEntity entity, WorkUnit workUnit) {
// Does nothing by default
}
protected Set<SourceEntity> getFilteredSourceEntities(SourceState state) {
Set<SourceEntity> unfilteredEntities = getSourceEntities(state);
return getFilteredSourceEntitiesHelper(state, unfilteredEntities);
}
static Set<SourceEntity> getFilteredSourceEntitiesHelper(SourceState state, Iterable<SourceEntity> unfilteredEntities) {
Set<SourceEntity> entities = new HashSet<>();
List<Pattern> blacklist = DatasetFilterUtils.getPatternList(state, ENTITY_BLACKLIST);
List<Pattern> whitelist = DatasetFilterUtils.getPatternList(state, ENTITY_WHITELIST);
for (SourceEntity entity : unfilteredEntities) {
if (DatasetFilterUtils.survived(entity.getSourceEntityName(), blacklist, whitelist)) {
entities.add(entity);
}
}
return entities;
}
public static Map<SourceEntity, State> getTableSpecificPropsFromState(
Iterable<SourceEntity> entities,
SourceState state) {
Map<String, SourceEntity> sourceEntityByName = new HashMap<>();
for (SourceEntity entity: entities) {
sourceEntityByName.put(entity.getDatasetName(), entity);
}
Map<String, State> datasetProps =
DatasetUtils.getDatasetSpecificProps(sourceEntityByName.keySet(), state);
Map<SourceEntity, State> res = new HashMap<>();
for (Map.Entry<String, State> entry: datasetProps.entrySet()) {
res.put(sourceEntityByName.get(entry.getKey()), entry.getValue());
}
return res;
}
protected Set<SourceEntity> getSourceEntities(State state) {
return getSourceEntitiesHelper(state);
}
static Set<SourceEntity> getSourceEntitiesHelper(State state) {
if (state.contains(ConfigurationKeys.SOURCE_ENTITIES)) {
log.info("Using entity names in " + ConfigurationKeys.SOURCE_ENTITIES);
HashSet<SourceEntity> res = new HashSet<>();
for (String sourceEntityName: state.getPropAsList(ConfigurationKeys.SOURCE_ENTITIES)) {
res.add(SourceEntity.fromSourceEntityName(sourceEntityName));
}
return res;
} else if (state.contains(ConfigurationKeys.SOURCE_ENTITY) ||
state.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY)) {
Optional<SourceEntity> sourceEntity = SourceEntity.fromState(state);
// Guaranteed to be present
log.info("Using entity name in " + sourceEntity.get());
return ImmutableSet.of(sourceEntity.get());
}
throw new IllegalStateException(String.format("One of the following properties must be specified: %s, %s.",
ConfigurationKeys.SOURCE_ENTITIES, ConfigurationKeys.SOURCE_ENTITY));
}
private static boolean shouldObtainTablePropsFromConfigStore(SourceState state) {
return state.getPropAsBoolean(SOURCE_OBTAIN_TABLE_PROPS_FROM_CONFIG_STORE,
DEFAULT_SOURCE_OBTAIN_TABLE_PROPS_FROM_CONFIG_STORE);
}
private static Map<SourceEntity, State> getTableSpecificPropsFromConfigStore(
Collection<SourceEntity> tables, State state) {
ConfigClient client = ConfigClientCache.getClient(VersionStabilityPolicy.STRONG_LOCAL_STABILITY);
String configStoreUri = state.getProp(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI);
Preconditions.checkNotNull(configStoreUri);
Map<SourceEntity, State> result = Maps.newHashMap();
for (SourceEntity table : tables) {
try {
result.put(table, ConfigUtils.configToState(
client.getConfig(PathUtils.combinePaths(configStoreUri, QUERY_BASED_SOURCE, table.getDatasetName()).toUri())));
} catch (VersionDoesNotExistException | ConfigStoreFactoryDoesNotExistsException
| ConfigStoreCreationException e) {
throw new RuntimeException("Unable to get table config for " + table, e);
}
}
return result;
}
private static SourceState getCombinedState(SourceState state, State tableSpecificState) {
if (tableSpecificState == null) {
return state;
}
SourceState combinedState =
new SourceState(state, state.getPreviousDatasetStatesByUrns(), state.getPreviousWorkUnitStates());
combinedState.addAll(tableSpecificState);
return combinedState;
}
/**
* Pack the list of {@code WorkUnit}s into {@code MultiWorkUnit}s.
*
* TODO: this is currently a simple round-robin packing. More sophisticated bin packing may be necessary
* if the round-robin approach leads to mapper skew.
*/
private static List<WorkUnit> pack(List<WorkUnit> workUnits, int numOfMultiWorkunits) {
Preconditions.checkArgument(numOfMultiWorkunits > 0);
if (workUnits.size() <= numOfMultiWorkunits) {
return workUnits;
}
List<WorkUnit> result = Lists.newArrayListWithCapacity(numOfMultiWorkunits);
for (int i = 0; i < numOfMultiWorkunits; i++) {
result.add(MultiWorkUnit.createEmpty());
}
for (int i = 0; i < workUnits.size(); i++) {
((MultiWorkUnit) result.get(i % numOfMultiWorkunits)).addWorkUnit(workUnits.get(i));
}
return result;
}
@Override
public void shutdown(SourceState state) {}
/**
* For each table, if job commit policy is to commit on full success, and the table has failed tasks in the
* previous run, return the lowest low watermark among all previous {@code WorkUnitState}s of the table.
* Otherwise, return the highest high watermark among all previous {@code WorkUnitState}s of the table.
*/
static Map<SourceEntity, Long> getPreviousWatermarksForAllTables(SourceState state) {
Map<SourceEntity, Long> result = Maps.newHashMap();
Map<SourceEntity, Long> prevLowWatermarksByTable = Maps.newHashMap();
Map<SourceEntity, Long> prevActualHighWatermarksByTable = Maps.newHashMap();
Set<SourceEntity> tablesWithFailedTasks = Sets.newHashSet();
Set<SourceEntity> tablesWithNoUpdatesOnPreviousRun = Sets.newHashSet();
boolean commitOnFullSuccess = JobCommitPolicy.getCommitPolicy(state) == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS;
for (WorkUnitState previousWus : state.getPreviousWorkUnitStates()) {
Optional<SourceEntity> sourceEntity = SourceEntity.fromState(previousWus);
if (!sourceEntity.isPresent()) {
log.warn("Missing source entity for WorkUnit state: " + previousWus);
continue;
}
SourceEntity table = sourceEntity.get();
long lowWm = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
LongWatermark waterMarkObj = previousWus.getWorkunit().getLowWatermark(LongWatermark.class);
// new job state file(version 0.2.1270) , water mark format:
// "watermark.interval.value": "{\"low.watermark.to.json\":{\"value\":20160101000000},\"expected.watermark.to.json\":{\"value\":20160715230234}}",
if(waterMarkObj != null){
lowWm = waterMarkObj.getValue();
}
// job state file(version 0.2.805)
// "workunit.low.water.mark": "20160711000000",
// "workunit.state.runtime.high.water.mark": "20160716140338",
else if(previousWus.getProperties().containsKey(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY)){
lowWm = Long.parseLong(previousWus.getProperties().getProperty(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY));
log.warn("can not find low water mark in json format, getting value from " + ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY + " low water mark " + lowWm);
}
if (!prevLowWatermarksByTable.containsKey(table)) {
prevLowWatermarksByTable.put(table, lowWm);
} else {
prevLowWatermarksByTable.put(table, Math.min(prevLowWatermarksByTable.get(table), lowWm));
}
long highWm = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
waterMarkObj = previousWus.getActualHighWatermark(LongWatermark.class);
if(waterMarkObj != null){
highWm = waterMarkObj.getValue();
}
else if(previousWus.getProperties().containsKey(ConfigurationKeys.WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK)){
highWm = Long.parseLong(previousWus.getProperties().getProperty(ConfigurationKeys.WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK));
log.warn("can not find high water mark in json format, getting value from " + ConfigurationKeys.WORK_UNIT_STATE_RUNTIME_HIGH_WATER_MARK + " high water mark " + highWm);
}
if (!prevActualHighWatermarksByTable.containsKey(table)) {
prevActualHighWatermarksByTable.put(table, highWm);
} else {
prevActualHighWatermarksByTable.put(table, Math.max(prevActualHighWatermarksByTable.get(table), highWm));
}
if (commitOnFullSuccess && !isSuccessfulOrCommited(previousWus)) {
tablesWithFailedTasks.add(table);
}
if (!isAnyDataProcessed(previousWus)) {
tablesWithNoUpdatesOnPreviousRun.add(table);
}
}
for (Map.Entry<SourceEntity, Long> entry : prevLowWatermarksByTable.entrySet()) {
if (tablesWithFailedTasks.contains(entry.getKey())) {
log.info("Resetting low watermark to {} because previous run failed.", entry.getValue());
result.put(entry.getKey(), entry.getValue());
} else if (tablesWithNoUpdatesOnPreviousRun.contains(entry.getKey())
&& state.getPropAsBoolean(ConfigurationKeys.SOURCE_QUERYBASED_RESET_EMPTY_PARTITION_WATERMARK,
ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_RESET_EMPTY_PARTITION_WATERMARK)) {
log.info("Resetting low watermakr to {} because previous run processed no data.", entry.getValue());
result.put(entry.getKey(), entry.getValue());
} else {
result.put(entry.getKey(), prevActualHighWatermarksByTable.get(entry.getKey()));
}
}
return result;
}
private static boolean isSuccessfulOrCommited(WorkUnitState wus) {
return wus.getWorkingState() == WorkingState.SUCCESSFUL || wus.getWorkingState() == WorkingState.COMMITTED;
}
private static boolean isAnyDataProcessed(WorkUnitState wus) {
return wus.getPropAsLong(ConfigurationKeys.EXTRACTOR_ROWS_EXPECTED, 0) > 0;
}
/**
* Initialize the logger.
*
* @param state
* Source state
*/
private static void initLogger(SourceState state) {
StringBuilder sb = new StringBuilder();
sb.append("[");
sb.append(StringUtils.stripToEmpty(state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA)));
sb.append("_");
sb.append(StringUtils.stripToEmpty(state.getProp(ConfigurationKeys.SOURCE_ENTITY)));
sb.append("]");
MDC.put("sourceInfo", sb.toString());
}
}
| 3,013 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/ExtractType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
/**
* Different extract types
*/
public enum ExtractType {
SNAPSHOT, // Used iff user wants highwatermark to be set to latest.
APPEND_DAILY, // Used iff user wants highwatermark to be set to a fixed point, like CURRENTDATE - <backoff days>.
APPEND_HOURLY, // Used iff user wants highwatermark to be set to a fixed point, like CURRENTHOUR - <backoff hours>.
APPEND_BATCH // <Please document this>
}
| 3,014 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/CommandOutput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.util.Map;
/**
* Stores the output of a Command into a Map object
* with types K and V
* @author stakiar
*
* @param <K> the key type of the Map
* @param <V> the value type of the Map
*/
public interface CommandOutput<K extends Command, V> {
public void storeResults(Map<K, V> results);
public Map<K, V> getResults();
public void put(K key, V value);
}
| 3,015 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/CommandType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
/**
* Interface for all source to
* track a list of their CommandTypes
* (e.g. CD, LS for SFTP or
* GET, PUT for REST)
* @author stakiar
*/
public interface CommandType {
}
| 3,016 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/ProtocolSpecificLayer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An interface for protocol extractors
*
* @param <D> type of data record
* @param <S> type of schema
*/
public interface ProtocolSpecificLayer<S, D> {
/**
* Extract metadata(schema) from the source
*
* @param source schema name
* @param source entity name
* @param work unit
* @throws SchemaException if there is anything wrong in extracting metadata
*/
public void extractMetadata(String schema, String entity, WorkUnit workUnit)
throws SchemaException, IOException;
/**
* High water mark for the snapshot pull
* @param watermarkSourceFormat
*
* @param source schema name
* @param source entity name
* @param watermark column
* @param watermark column format
* @param list of all predicates that needs to be applied
* @return high water mark
* @throws SchemaException if there is anything wrong in getting high water mark
*/
public long getMaxWatermark(String schema, String entity, String watermarkColumn,
List<Predicate> snapshotPredicateList, String watermarkSourceFormat)
throws HighWatermarkException;
/**
* Source record count
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param list of all predicates that needs to be applied
* @return record count
* @throws RecordCountException if there is anything wrong in getting record count
*/
public long getSourceCount(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws RecordCountException;
/**
* record set: data records with an iterator
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param list of all predicates that needs to be applied
* @return iterator with set of records
* @throws SchemaException if there is anything wrong in getting data records
*/
public Iterator<D> getRecordSet(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws DataRecordException, IOException;
/**
* water mark source format of water mark type
* @return water mark source format(yyyyMMddHHmmss, yyyyMMdd etc.)
*/
public String getWatermarkSourceFormat(WatermarkType watermarkType);
/**
* date predicate condition for types like timestamp and date
* @return predicate condition (LastModifiedHour >= 10 and LastModifiedHour <= 20)
*/
public String getHourPredicateCondition(String column, long value, String valueFormat, String operator);
/**
* date predicate condition for types like timestamp and date
* @return predicate condition (LastModifiedDate >= 2014-01-01 and LastModifiedDate <= 2014-01-01)
*/
public String getDatePredicateCondition(String column, long value, String valueFormat, String operator);
/**
* timestamp predicate condition for types like timestamp
* @return predicate condition (LastModifiedTimestamp >= 2014-01-01T00:00:00.000Z and LastModifiedTimestamp <= 2014-01-10T15:05:00.000Z)
*/
public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator);
/**
* set timeout for the source connection
*/
public void setTimeOut(int timeOut);
/**
* Data type of source
*
* @return Map of source and target data types
*/
public Map<String, String> getDataTypeMap();
/**
* Close connection after the completion of extract whether its success or failure
* @throws Exception
*/
public void closeConnection()
throws Exception;
/**
* Get records using source specific api (Example: bulk api in salesforce source)
* record set: data records with an iterator
*
* @param source schema name
* @param source entity name
* @param work unit: properties
* @param list of all predicates that needs to be applied
* @return iterator with set of records
* @throws SchemaException if there is anything wrong in getting data records
*/
public Iterator<D> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit,
List<Predicate> predicateList)
throws IOException;
}
| 3,017 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.sftp;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.filebased.FileBasedExtractor;
/**
* Abstract class that implements the SFTP
* protocol for connecting to source
* and downloading files
* @author stakiar
*/
public class SftpExtractor<S, D> extends FileBasedExtractor<S, D> {
public SftpExtractor(WorkUnitState workUnitState) {
super(workUnitState, new SftpFsHelper(workUnitState));
}
}
| 3,018 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.sftp;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Vector;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.jcraft.jsch.Channel;
import com.jcraft.jsch.ChannelExec;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.ChannelSftp.LsEntry;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.ProxyHTTP;
import com.jcraft.jsch.Session;
import com.jcraft.jsch.SftpException;
import com.jcraft.jsch.SftpProgressMonitor;
import com.jcraft.jsch.UserInfo;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.filebased.TimestampAwareFileBasedHelper;
import org.apache.gobblin.util.io.SeekableFSInputStream;
/**
* Connects to a source via SFTP and executes a given list of SFTP commands
* @author stakiar
*/
@Slf4j
public class SftpFsHelper implements TimestampAwareFileBasedHelper {
private Session session;
private State state;
private static final String SFTP_CONNECTION_TIMEOUT_KEY = "sftpConn.timeout";
private static final int DEFAULT_SFTP_CONNECTION_TIMEOUT = 3000;
public SftpFsHelper(State state) {
this.state = state;
}
/**
* The method returns a new {@link ChannelSftp} without throwing an exception. Returns a null if any exception occurs
* trying to get a new channel. The method exists for backward compatibility
*
* @deprecated use {@link #getSftpChannel()} instead.
*
* @return
*/
@Deprecated
public ChannelSftp getSftpConnection() {
try {
return this.getSftpChannel();
} catch (SftpException e) {
log.error("Failed to get new sftp channel", e);
return null;
}
}
/**
* Create new channel every time a command needs to be executed. This is required to support execution of multiple
* commands in parallel. All created channels are cleaned up when the session is closed.
*
*
* @return a new {@link ChannelSftp}
* @throws SftpException
*/
public ChannelSftp getSftpChannel() throws SftpException {
try {
ChannelSftp channelSftp = (ChannelSftp) this.session.openChannel("sftp");
// In millsec
int connTimeout = state.getPropAsInt(SFTP_CONNECTION_TIMEOUT_KEY, DEFAULT_SFTP_CONNECTION_TIMEOUT);
channelSftp.connect(connTimeout);
return channelSftp;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
}
/**
* Create a new sftp channel to execute commands.
*
* @param command to execute on the remote machine
* @return a new execution channel
* @throws SftpException if a channel could not be opened
*/
public ChannelExec getExecChannel(String command) throws SftpException {
ChannelExec channelExec;
try {
channelExec = (ChannelExec) this.session.openChannel("exec");
channelExec.setCommand(command);
channelExec.connect();
return channelExec;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
}
/**
* Opens up a connection to specified host using the username. Connects to the source using a private key without
* prompting for a password. This method does not support connecting to a source using a password, only by private
* key
* @throws org.apache.gobblin.source.extractor.filebased.FileBasedHelperException
*/
@Override
public void connect() throws FileBasedHelperException {
String privateKey = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PRIVATE_KEY));
String password = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
String knownHosts = this.state.getProp(ConfigurationKeys.SOURCE_CONN_KNOWN_HOSTS);
String userName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
String hostName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
int port = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_PORT, ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT);
String proxyHost = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL);
int proxyPort = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT, -1);
JSch.setLogger(new JSchLogger());
JSch jsch = new JSch();
log.info("Attempting to connect to source via SFTP with" + " privateKey: " + privateKey + " knownHosts: "
+ knownHosts + " userName: " + userName + " hostName: " + hostName + " port: " + port + " proxyHost: "
+ proxyHost + " proxyPort: " + proxyPort);
try {
if (!Strings.isNullOrEmpty(privateKey)) {
List<IdentityStrategy> identityStrategies = ImmutableList.of(new LocalFileIdentityStrategy(),
new DistributedCacheIdentityStrategy(), new HDFSIdentityStrategy());
for (IdentityStrategy identityStrategy : identityStrategies) {
if (identityStrategy.setIdentity(privateKey, jsch)) {
break;
}
}
}
this.session = jsch.getSession(userName, hostName, port);
this.session.setConfig("PreferredAuthentications", "publickey,password");
if (Strings.isNullOrEmpty(knownHosts)) {
log.info("Known hosts path is not set, StrictHostKeyChecking will be turned off");
this.session.setConfig("StrictHostKeyChecking", "no");
} else {
jsch.setKnownHosts(knownHosts);
}
if (!Strings.isNullOrEmpty(password)) {
this.session.setPassword(password);
}
if (proxyHost != null && proxyPort >= 0) {
this.session.setProxy(new ProxyHTTP(proxyHost, proxyPort));
}
UserInfo ui = new MyUserInfo();
this.session.setUserInfo(ui);
this.session.setDaemonThread(true);
this.session.connect();
log.info("Finished connecting to source");
} catch (JSchException e) {
if (this.session != null) {
this.session.disconnect();
}
log.error(e.getMessage(), e);
throw new FileBasedHelperException("Cannot connect to SFTP source", e);
}
}
/**
* Executes a get SftpCommand and returns an input stream to the file
* @param cmd is the command to execute
* @param sftp is the channel to execute the command on
* @throws SftpException
*/
@Override
public InputStream getFileStream(String file) throws FileBasedHelperException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channel = getSftpChannel();
return new SftpFsFileInputStream(channel.get(file, monitor), channel);
} catch (SftpException e) {
throw new FileBasedHelperException("Cannot download file " + file + " due to " + e.getMessage(), e);
}
}
@Override
public List<String> ls(String path) throws FileBasedHelperException {
try {
List<String> list = new ArrayList<>();
ChannelSftp channel = getSftpChannel();
Vector<LsEntry> vector = channel.ls(path);
for (LsEntry entry : vector) {
list.add(entry.getFilename());
}
channel.disconnect();
return list;
} catch (SftpException e) {
throw new FileBasedHelperException("Cannot execute ls command on sftp connection", e);
}
}
@Override
public void close() {
if (this.session != null) {
this.session.disconnect();
}
}
@Override
public long getFileSize(String filePath) throws FileBasedHelperException {
try {
ChannelSftp channelSftp = getSftpChannel();
long fileSize = channelSftp.lstat(filePath).getSize();
channelSftp.disconnect();
return fileSize;
} catch (SftpException e) {
throw new FileBasedHelperException(
String.format("Failed to get size for file at path %s due to error %s", filePath, e.getMessage()), e);
}
}
/**
* Implementation of an SftpProgressMonitor to monitor the progress of file downloads using the ChannelSftp.GET
* methods
* @author stakiar
*/
public static class SftpGetMonitor implements SftpProgressMonitor {
private int op;
private String src;
private String dest;
private long totalCount;
private long logFrequency;
private long startime;
@Override
public void init(int op, String src, String dest, long max) {
this.op = op;
this.src = src;
this.dest = dest;
this.startime = System.currentTimeMillis();
this.logFrequency = 0L;
log.info("Operation GET (" + op + ") has started with src: " + src + " dest: " + dest + " and file length: "
+ (max / 1000000L) + " mb");
}
@Override
public boolean count(long count) {
this.totalCount += count;
if (this.logFrequency == 0L) {
this.logFrequency = 1000L;
log.info(
"Transfer is in progress for file: " + this.src + ". Finished transferring " + this.totalCount + " bytes ");
long mb = this.totalCount / 1000000L;
log.info("Transferd " + mb + " Mb. Speed " + getMbps() + " Mbps");
}
this.logFrequency--;
return true;
}
@Override
public void end() {
long secs = (System.currentTimeMillis() - this.startime) / 1000L;
log.info("Transfer finished " + this.op + " src: " + this.src + " dest: " + this.dest + " in " + secs + " at "
+ getMbps());
}
private String getMbps() {
long mb = this.totalCount / 1000000L;
long secs = (System.currentTimeMillis() - this.startime) / 1000L;
double mbps = secs == 0L ? 0.0D : mb * 1.0D / secs;
return String.format("%.2f", new Object[] { Double.valueOf(mbps) });
}
}
/**
* Basic implementation of jsch.Logger that logs the output from the JSch commands to slf4j
* @author stakiar
*/
public static class JSchLogger implements com.jcraft.jsch.Logger {
@Override
public boolean isEnabled(int level) {
switch (level) {
case DEBUG:
return log.isDebugEnabled();
case INFO:
return log.isInfoEnabled();
case WARN:
return log.isWarnEnabled();
case ERROR:
return log.isErrorEnabled();
case FATAL:
return log.isErrorEnabled();
default:
return false;
}
}
@Override
public void log(int level, String message) {
switch (level) {
case DEBUG:
log.debug(message);
break;
case INFO:
log.info(message);
break;
case WARN:
log.warn(message);
break;
case ERROR:
log.error(message);
break;
case FATAL:
log.error(message);
break;
default:
log.info(message);
break;
}
}
}
/**
* Implementation of UserInfo class for JSch which allows for password-less login via keys
* @author stakiar
*/
public static class MyUserInfo implements UserInfo {
// The passphrase used to access the private key
@Override
public String getPassphrase() {
return null;
}
// The password to login to the client server
@Override
public String getPassword() {
return null;
}
@Override
public boolean promptPassword(String message) {
return true;
}
@Override
public boolean promptPassphrase(String message) {
return true;
}
@Override
public boolean promptYesNo(String message) {
return true;
}
@Override
public void showMessage(String message) {
log.info(message);
}
}
/**
* Interface for multiple identity setter strategies
*/
private interface IdentityStrategy {
public boolean setIdentity(String privateKey, JSch jsch);
}
/**
* Sets identity using a file on HDFS
*/
private static class HDFSIdentityStrategy implements IdentityStrategy {
@Override
public boolean setIdentity(String privateKey, JSch jsch) {
FileSystem fs;
try {
fs = FileSystem.get(new Configuration());
} catch (Exception e) {
log.warn("Failed to set identity using HDFS file. Will attempt next strategy. " + e.getMessage());
return false;
}
Preconditions.checkNotNull(fs, "FileSystem cannot be null");
try (FSDataInputStream privateKeyStream = fs.open(new Path(privateKey))) {
byte[] bytes = IOUtils.toByteArray(privateKeyStream);
jsch.addIdentity("sftpIdentityKey", bytes, (byte[]) null, (byte[]) null);
log.info("Successfully set identity using HDFS file");
return true;
} catch (Exception e) {
log.warn("Failed to set identity using HDFS file. Will attempt next strategy. " + e.getMessage());
return false;
}
}
}
/**
* Sets identity using a local file
*/
private static class LocalFileIdentityStrategy implements IdentityStrategy {
@Override
public boolean setIdentity(String privateKey, JSch jsch) {
try {
jsch.addIdentity(privateKey);
log.info("Successfully set identity using local file " + privateKey);
return true;
} catch (Exception e) {
log.warn("Failed to set identity using local file. Will attempt next strategy. " + e.getMessage());
}
return false;
}
}
/**
* Sets identity using a file on distributed cache
*/
private static class DistributedCacheIdentityStrategy extends LocalFileIdentityStrategy {
@Override
public boolean setIdentity(String privateKey, JSch jsch) {
return super.setIdentity(new File(privateKey).getName(), jsch);
}
}
@Override
public long getFileMTime(String filePath) throws FileBasedHelperException {
ChannelSftp channelSftp = null;
try {
channelSftp = getSftpChannel();
int modificationTime = channelSftp.lstat(filePath).getMTime();
return modificationTime;
} catch (SftpException e) {
throw new FileBasedHelperException(
String.format("Failed to get modified timestamp for file at path %s due to error %s", filePath,
e.getMessage()),
e);
} finally {
if (channelSftp != null) {
channelSftp.disconnect();
}
}
}
/**
* A {@link SeekableFSInputStream} that holds a handle on the Sftp {@link Channel} used to open the
* {@link InputStream}. The {@link Channel} is disconnected when {@link InputStream#close()} is called.
*/
static class SftpFsFileInputStream extends SeekableFSInputStream {
private final Channel channel;
public SftpFsFileInputStream(InputStream in, Channel channel) {
super(in);
this.channel = channel;
}
@Override
public void close() throws IOException {
super.close();
this.channel.disconnect();
}
}
}
| 3,019 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.sftp;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import java.io.IOException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.filebased.FileBasedSource;
public class SftpSource<S, D> extends FileBasedSource<S, D> {
@Override
public Extractor<S, D> getExtractor(WorkUnitState state) throws IOException {
return new SftpExtractor<>(state);
}
@Override
public void initFileSystemHelper(State state) throws FileBasedHelperException {
this.fsHelper = new SftpFsHelper(state);
this.fsHelper.connect();
}
}
| 3,020 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpLightWeightFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.sftp;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BufferedFSInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import com.google.common.collect.Lists;
import com.jcraft.jsch.Channel;
import com.jcraft.jsch.ChannelExec;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.SftpATTRS;
import com.jcraft.jsch.SftpException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.sftp.SftpFsHelper.SftpGetMonitor;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.util.HadoopUtils;
/**
* A {@link FileSystem} implementation that provides the {@link FileSystem} interface for an SFTP server. Uses
* {@link SftpFsHelper} internally to connect to the SFPT server. {@link HadoopUtils#newConfiguration()}
* <ul>
* <li>It is the caller's responsibility to call {@link #close()} on this {@link FileSystem} to disconnect the session.
* <li>Use {@link HadoopUtils#newConfiguration()} when creating a {@link FileSystem} with
* {@link FileSystem#get(Configuration)}. It creates a new {@link SftpLightWeightFileSystem} everytime instead of cached
* copy
* </ul>
*/
public class SftpLightWeightFileSystem extends FileSystem {
private static final URI NAME = URI.create("sftp:///");
private SftpFsHelper fsHelper;
private static final int DEFAULT_BUFFER_SIZE = 32 * 1024;
private static final PathFilter VALID_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
if (path == null) {
return false;
}
if (StringUtils.isBlank(path.toString())) {
return false;
}
if (path.toString().equals(".")) {
return false;
}
if (path.toString().equals("..")) {
return false;
}
return true;
}
};
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
State state = HadoopUtils.getStateFromConf(conf);
this.fsHelper = new SftpFsHelper(state);
try {
this.fsHelper.connect();
} catch (FileBasedHelperException e) {
throw new IOException(e);
}
}
@Override
public boolean delete(Path path) throws IOException {
ChannelSftp channel = null;
try {
channel = this.fsHelper.getSftpChannel();
if (getFileStatus(path).isDirectory()) {
channel.rmdir(HadoopUtils.toUriPath(path));
} else {
channel.rm(HadoopUtils.toUriPath(path));
}
} catch (SftpException e) {
throw new IOException(e);
} finally {
safeDisconnect(channel);
}
return true;
}
@Override
public boolean delete(Path path, boolean recursive) throws IOException {
return delete(path);
}
@Override
public FileStatus getFileStatus(Path path) throws IOException {
ChannelSftp channelSftp = null;
ChannelExec channelExec1 = null;
ChannelExec channelExec2 = null;
try {
channelSftp = this.fsHelper.getSftpChannel();
SftpATTRS sftpAttrs = channelSftp.stat(HadoopUtils.toUriPath(path));
FsPermission permission = new FsPermission((short) sftpAttrs.getPermissions());
channelExec1 = this.fsHelper.getExecChannel("id " + sftpAttrs.getUId());
String userName = IOUtils.toString(channelExec1.getInputStream());
channelExec2 = this.fsHelper.getExecChannel("id " + sftpAttrs.getGId());
String groupName = IOUtils.toString(channelExec2.getInputStream());
FileStatus fs =
new FileStatus(sftpAttrs.getSize(), sftpAttrs.isDir(), 1, 0l, sftpAttrs.getMTime(), sftpAttrs.getATime(),
permission, StringUtils.trimToEmpty(userName), StringUtils.trimToEmpty(groupName), path);
return fs;
} catch (SftpException e) {
throw new IOException(e);
} finally {
safeDisconnect(channelSftp);
safeDisconnect(channelExec1);
safeDisconnect(channelExec2);
}
}
@Override
public URI getUri() {
return NAME;
}
@Override
public Path getWorkingDirectory() {
ChannelSftp channelSftp = null;
try {
channelSftp = this.fsHelper.getSftpChannel();
Path workingDir = new Path(channelSftp.pwd());
return workingDir;
} catch (SftpException e) {
return null;
} finally {
safeDisconnect(channelSftp);
}
}
@Override
public FileStatus[] listStatus(Path path) throws IOException {
try {
List<String> fileNames = this.fsHelper.ls(HadoopUtils.toUriPath(path));
List<FileStatus> status = Lists.newArrayListWithCapacity(fileNames.size());
for (String name : fileNames) {
Path filePath = new Path(name);
if (VALID_PATH_FILTER.accept(filePath)) {
status.add(getFileStatus(new Path(path, filePath)));
}
}
return status.toArray(new FileStatus[status.size()]);
} catch (FileBasedHelperException e) {
throw new IOException(e);
}
}
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
ChannelSftp channel = null;
try {
channel = this.fsHelper.getSftpChannel();
channel.mkdir(HadoopUtils.toUriPath(path));
channel.chmod(permission.toShort(), HadoopUtils.toUriPath(path));
} catch (SftpException e) {
throw new IOException(e);
} finally {
safeDisconnect(channel);
}
return true;
}
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channelSftp = this.fsHelper.getSftpChannel();
InputStream is = channelSftp.get(HadoopUtils.toUriPath(path), monitor);
return new FSDataInputStream(new BufferedFSInputStream(new SftpFsHelper.SftpFsFileInputStream(is, channelSftp), bufferSize));
} catch (SftpException e) {
throw new IOException(e);
}
}
@Override
public FSDataInputStream open(Path path) throws IOException {
return open(path, DEFAULT_BUFFER_SIZE);
}
@Override
public boolean rename(Path oldPath, Path newPath) throws IOException {
ChannelSftp channelSftp = null;
try {
channelSftp = this.fsHelper.getSftpChannel();
channelSftp.rename(HadoopUtils.toUriPath(oldPath), HadoopUtils.toUriPath(newPath));
} catch (SftpException e) {
throw new IOException(e);
} finally {
safeDisconnect(channelSftp);
}
return true;
}
@Override
public void setWorkingDirectory(Path path) {
ChannelSftp channelSftp = null;
try {
channelSftp = this.fsHelper.getSftpChannel();
channelSftp.lcd(HadoopUtils.toUriPath(path));
} catch (SftpException e) {
throw new RuntimeException("Failed to set working directory", e);
} finally {
safeDisconnect(channelSftp);
}
}
@Override
public void close() {
this.fsHelper.close();
}
@Override
public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3, short arg4, long arg5,
Progressable arg6) throws IOException {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Null safe disconnect
*/
private static void safeDisconnect(Channel channel) {
if (channel != null) {
channel.disconnect();
}
}
}
| 3,021 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/restapi/RestApiExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.restapi;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.exception.RestApiProcessingException;
import org.apache.gobblin.source.extractor.utils.Utils;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.common.base.Splitter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.SourceSpecificLayer;
import org.apache.gobblin.source.extractor.schema.Schema;
import org.apache.gobblin.source.workunit.WorkUnit;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of rest api extractor for the sources that are using rest api
*
* @param <D> type of data record
* @param <S> type of schema
*/
@Slf4j
public abstract class RestApiExtractor extends QueryBasedExtractor<JsonArray, JsonElement>
implements SourceSpecificLayer<JsonArray, JsonElement>, RestApiSpecificLayer {
private static final Gson GSON = new Gson();
protected String instanceUrl;
protected String updatedQuery;
protected final RestApiConnector connector;
public RestApiExtractor(WorkUnitState state) {
super(state);
this.connector = getConnector(state);
}
protected abstract RestApiConnector getConnector(WorkUnitState state);
protected String buildDataQuery(String inputQuery, String entity) {
String dataQuery = null;
if (inputQuery == null && this.columnList.size() != 0) {
// if input query is null, build the query from metadata
dataQuery = "SELECT " + Joiner.on(",").join(this.columnList) + " FROM " + entity;
} else {
// if input query is not null, build the query with intersection of columns from input query and columns from Metadata
if (inputQuery != null) {
String queryLowerCase = inputQuery.toLowerCase();
int columnsStartIndex = queryLowerCase.indexOf("select ") + 7;
int columnsEndIndex = queryLowerCase.indexOf(" from ");
if (columnsStartIndex > 0 && columnsEndIndex > 0) {
String givenColumnList = inputQuery.substring(columnsStartIndex, columnsEndIndex);
dataQuery = inputQuery.replace(givenColumnList, Joiner.on(",").join(this.columnList));
} else {
dataQuery = inputQuery;
}
}
}
log.info("Updated data query: " + dataQuery);
return dataQuery;
}
protected List<String> extractColumnListInQuery(String query) {
return Utils.getColumnListFromQuery(query);
}
@Override
public void extractMetadata(String schema, String entity, WorkUnit workUnit) throws SchemaException {
log.info("Extract Metadata using Rest Api");
JsonArray columnArray = new JsonArray();
String inputQuery = workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_QUERY);
List<String> columnListInQuery = null;
JsonArray array = null;
if (!Strings.isNullOrEmpty(inputQuery)) {
columnListInQuery = extractColumnListInQuery(inputQuery);
}
String excludedColumns = workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_EXCLUDED_COLUMNS);
List<String> columnListExcluded = ImmutableList.<String> of();
if (Strings.isNullOrEmpty(inputQuery) && !Strings.isNullOrEmpty(excludedColumns)) {
Splitter splitter = Splitter.on(",").omitEmptyStrings().trimResults();
columnListExcluded = splitter.splitToList(excludedColumns.toLowerCase());
}
try {
boolean success = this.connector.connect();
if (!success) {
throw new SchemaException("Failed to connect.");
}
log.debug("Connected successfully.");
List<Command> cmds = this.getSchemaMetadata(schema, entity);
CommandOutput<?, ?> response = this.connector.getResponse(cmds);
array = this.getSchema(response);
for (JsonElement columnElement : array) {
Schema obj = GSON.fromJson(columnElement, Schema.class);
String columnName = obj.getColumnName();
obj.setWaterMark(this.isWatermarkColumn(workUnitState.getProp("extract.delta.fields"), columnName));
if (this.isWatermarkColumn(workUnitState.getProp("extract.delta.fields"), columnName)) {
obj.setNullable(false);
} else if (this.getPrimarykeyIndex(workUnitState.getProp("extract.primary.key.fields"), columnName) == 0) {
// set all columns as nullable except primary key and watermark columns
obj.setNullable(true);
}
obj.setPrimaryKey(this.getPrimarykeyIndex(workUnitState.getProp("extract.primary.key.fields"), columnName));
String jsonStr = GSON.toJson(obj);
JsonObject jsonObject = GSON.fromJson(jsonStr, JsonObject.class).getAsJsonObject();
// If input query is null or provided '*' in the query select all columns.
// Else, consider only the columns mentioned in the column list
if (inputQuery == null || columnListInQuery == null
|| (columnListInQuery.size() == 1 && columnListInQuery.get(0).equals("*"))
|| (columnListInQuery.size() >= 1 && this.isMetadataColumn(columnName, columnListInQuery))) {
if (!columnListExcluded.contains(columnName.trim().toLowerCase())) {
this.columnList.add(columnName);
columnArray.add(jsonObject);
}
}
}
this.updatedQuery = buildDataQuery(inputQuery, entity);
log.info("Schema:" + columnArray);
this.setOutputSchema(columnArray);
} catch (RuntimeException | RestApiProcessingException | RestApiConnectionException | IOException
| SchemaException e) {
throw new SchemaException("Failed to get schema using rest api; error - " + e.getMessage(), e);
}
}
@Override
public long getMaxWatermark(String schema, String entity, String watermarkColumn, List<Predicate> predicateList,
String watermarkSourceFormat) throws HighWatermarkException {
log.info("Get high watermark using Rest Api");
long CalculatedHighWatermark = -1;
try {
boolean success = this.connector.connect();
if (!success) {
throw new HighWatermarkException("Failed to connect.");
}
log.debug("Connected successfully.");
List<Command> cmds = this.getHighWatermarkMetadata(schema, entity, watermarkColumn, predicateList);
CommandOutput<?, ?> response = this.connector.getResponse(cmds);
CalculatedHighWatermark = this.getHighWatermark(response, watermarkColumn, watermarkSourceFormat);
log.info("High watermark:" + CalculatedHighWatermark);
return CalculatedHighWatermark;
} catch (Exception e) {
throw new HighWatermarkException("Failed to get high watermark using rest api; error - " + e.getMessage(), e);
}
}
@Override
public long getSourceCount(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws RecordCountException {
log.info("Get source record count using Rest Api");
long count = 0;
try {
boolean success = this.connector.connect();
if (!success) {
throw new RecordCountException("Failed to connect.");
}
log.debug("Connected successfully.");
List<Command> cmds = this.getCountMetadata(schema, entity, workUnit, predicateList);
CommandOutput<?, ?> response = this.connector.getResponse(cmds);
count = getCount(response);
log.info("Source record count:" + count);
return count;
} catch (Exception e) {
throw new RecordCountException("Failed to get record count using rest api; error - " + e.getMessage(), e);
}
}
@Override
public Iterator<JsonElement> getRecordSet(String schema, String entity, WorkUnit workUnit,
List<Predicate> predicateList) throws DataRecordException {
log.debug("Get data records using Rest Api");
Iterator<JsonElement> rs = null;
List<Command> cmds;
try {
boolean success = true;
if (this.connector.isConnectionClosed()) {
success = this.connector.connect();
}
if (!success) {
throw new DataRecordException("Failed to connect.");
}
log.debug("Connected successfully.");
if (this.getPullStatus() == false) {
return null;
}
if (this.getNextUrl() == null) {
cmds = this.getDataMetadata(schema, entity, workUnit, predicateList);
} else {
cmds = RestApiConnector.constructGetCommand(this.getNextUrl());
}
CommandOutput<?, ?> response = this.connector.getResponse(cmds);
rs = this.getData(response);
return rs;
} catch (Exception e) {
throw new DataRecordException("Failed to get records using rest api; error - " + e.getMessage(), e);
}
}
@Override
public void setTimeOut(int timeOut) {
this.connector.setAuthTokenTimeout(timeOut);
}
@Override
public void closeConnection() throws Exception {
this.connector.close();
}
}
| 3,022 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/restapi/RestApiCommandOutput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.restapi;
import java.util.HashMap;
import java.util.Map;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
public class RestApiCommandOutput implements CommandOutput<RestApiCommand, String> {
private Map<RestApiCommand, String> results = new HashMap<>();
@Override
public void storeResults(Map<RestApiCommand, String> results) {
this.results = results;
}
@Override
public Map<RestApiCommand, String> getResults() {
return this.results;
}
@Override
public void put(RestApiCommand key, String value) {
this.results.put(key, value);
}
}
| 3,023 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/restapi/RestApiConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.restapi;
import com.google.common.io.Closer;
import java.io.Closeable;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.StatusLine;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.util.EntityUtils;
import com.google.common.base.Charsets;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.http.HttpClientConfiguratorLoader;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.exception.RestApiProcessingException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand.RestApiCommandType;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
* A class for connecting to Rest APIs, construct queries and getting responses.
*/
@Slf4j
public abstract class RestApiConnector implements Closeable {
public static final String REST_API_CONNECTOR_CLASS = "rest.api.connector.class";
protected static final Gson GSON = new Gson();
protected HttpClient httpClient = null;
protected boolean autoEstablishAuthToken = false;
@Setter
protected long authTokenTimeout;
protected String accessToken = null;
protected long createdAt;
protected String instanceUrl;
protected String updatedQuery;
protected Closer closer = Closer.create();
protected final State state;
public RestApiConnector(State state) {
this.state = state;
this.authTokenTimeout =
state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_TIMEOUT, ConfigurationKeys.DEFAULT_CONN_TIMEOUT);
}
@Override
public void close() throws IOException {
// This is to close any idle connections opening by the httpClient
this.closer.close();
if (this.getHttpClient() != null && !(this.getHttpClient() instanceof Closeable)) {
log.warn("httpClient is not closable, we will only close the idle connections");
this.getHttpClient().getConnectionManager().closeIdleConnections(0, TimeUnit.MILLISECONDS);
}
}
/**
* get http connection
* @return true if the connection is success else false
*/
public boolean connect() throws RestApiConnectionException {
if (this.autoEstablishAuthToken) {
if (this.authTokenTimeout <= 0) {
return false;
} else if ((System.currentTimeMillis() - this.createdAt) > this.authTokenTimeout) {
return false;
}
}
HttpEntity httpEntity = null;
try {
httpEntity = getAuthentication();
if (httpEntity != null) {
JsonElement json = GSON.fromJson(EntityUtils.toString(httpEntity), JsonObject.class);
if (json == null) {
log.error("Http entity: " + httpEntity);
log.error("entity class: " + httpEntity.getClass().getName());
log.error("entity string size: " + EntityUtils.toString(httpEntity).length());
log.error("content length: " + httpEntity.getContentLength());
log.error("content: " + IOUtils.toString(httpEntity.getContent(), Charsets.UTF_8));
throw new RestApiConnectionException(
"JSON is NULL ! Failed on authentication with the following HTTP response received:\n"
+ EntityUtils.toString(httpEntity));
}
JsonObject jsonRet = json.getAsJsonObject();
log.info("jsonRet: " + jsonRet.toString());
parseAuthenticationResponse(jsonRet);
}
} catch (IOException e) {
throw new RestApiConnectionException("Failed to get rest api connection; error - " + e.getMessage(), e);
} finally {
if (httpEntity != null) {
try {
EntityUtils.consume(httpEntity);
} catch (IOException e) {
throw new RestApiConnectionException("Failed to consume httpEntity; error - " + e.getMessage(), e);
}
}
}
return true;
}
protected HttpClient getHttpClient() {
if (this.httpClient == null) {
HttpClientConfiguratorLoader configuratorLoader = new HttpClientConfiguratorLoader(this.state);
this.httpClient = configuratorLoader.getConfigurator()
.setStatePropertiesPrefix(ConfigurationKeys.SOURCE_CONN_PREFIX)
.configure(this.state)
.createClient();
if (httpClient instanceof Closeable) {
this.closer.register((Closeable)httpClient);
}
}
return this.httpClient;
}
private static boolean hasId(JsonObject json) {
if (json.has("id") || json.has("Id") || json.has("ID") || json.has("iD")) {
return true;
}
return false;
}
/**
* get http response in json format using url
* @return json string with the response
*/
public CommandOutput<?, ?> getResponse(List<Command> cmds) throws RestApiProcessingException {
String url = cmds.get(0).getParams().get(0);
log.info("URL: " + url);
String jsonStr = null;
HttpRequestBase httpRequest = new HttpGet(url);
addHeaders(httpRequest);
HttpEntity httpEntity = null;
HttpResponse httpResponse = null;
try {
httpResponse = this.httpClient.execute(httpRequest);
StatusLine status = httpResponse.getStatusLine();
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
jsonStr = EntityUtils.toString(httpEntity);
}
if (status.getStatusCode() >= 400) {
log.info("Unable to get response using {} with status code {}: {}", url, status.getStatusCode(), jsonStr);
JsonElement jsonRet = GSON.fromJson(jsonStr, JsonArray.class);
throw new RestApiProcessingException(getFirstErrorMessage("Failed to retrieve response from ", jsonRet));
}
} catch (Exception e) {
throw new RestApiProcessingException("Failed to process rest api request; error - " + e.getMessage(), e);
} finally {
try {
if (httpEntity != null) {
EntityUtils.consume(httpEntity);
}
if (httpResponse instanceof Closeable) {
this.closer.register((Closeable)httpResponse);
}
} catch (Exception e) {
throw new RestApiProcessingException("Failed to consume httpEntity; error - " + e.getMessage(), e);
}
}
CommandOutput<RestApiCommand, String> output = new RestApiCommandOutput();
output.put((RestApiCommand) cmds.get(0), jsonStr);
return output;
}
protected void addHeaders(HttpRequestBase httpRequest) {
if (this.accessToken != null) {
httpRequest.addHeader("Authorization", "OAuth " + this.accessToken);
}
httpRequest.addHeader("Content-Type", "application/json");
//httpRequest.addHeader("Accept-Encoding", "zip");
//httpRequest.addHeader("Content-Encoding", "gzip");
//httpRequest.addHeader("Connection", "Keep-Alive");
//httpRequest.addHeader("Keep-Alive", "timeout=60000");
}
/**
* get error message while executing http url
* @return error message
*/
private static String getFirstErrorMessage(String defaultMessage, JsonElement json) {
if (json == null) {
return defaultMessage;
}
JsonObject jsonObject = null;
if (!json.isJsonArray()) {
jsonObject = json.getAsJsonObject();
} else {
JsonArray jsonArray = json.getAsJsonArray();
if (jsonArray.size() != 0) {
jsonObject = jsonArray.get(0).getAsJsonObject();
}
}
if (jsonObject != null) {
if (jsonObject.has("error_description")) {
defaultMessage = defaultMessage + jsonObject.get("error_description").getAsString();
} else if (jsonObject.has("message")) {
defaultMessage = defaultMessage + jsonObject.get("message").getAsString();
}
}
return defaultMessage;
}
/**
* Build a list of {@link Command}s given a String Rest query.
*/
public static List<Command> constructGetCommand(String restQuery) {
return Arrays.asList(new RestApiCommand().build(Arrays.asList(restQuery), RestApiCommandType.GET));
}
public boolean isConnectionClosed() {
return this.httpClient == null;
}
/**
* To be overridden by subclasses that require authentication.
*/
public abstract HttpEntity getAuthentication() throws RestApiConnectionException;
protected void parseAuthenticationResponse(JsonObject jsonRet) throws RestApiConnectionException {
if (!hasId(jsonRet)) {
throw new RestApiConnectionException("Failed on authentication with the following HTTP response received:"
+ jsonRet.toString());
}
this.instanceUrl = jsonRet.get("instance_url").getAsString();
this.accessToken = jsonRet.get("access_token").getAsString();
this.createdAt = System.currentTimeMillis();
}
}
| 3,024 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/restapi/RestApiSpecificLayer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.restapi;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.http.HttpEntity;
public interface RestApiSpecificLayer {
public HttpEntity getAuthentication()
throws RestApiConnectionException;
public boolean getPullStatus();
public String getNextUrl();
}
| 3,025 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/restapi/RestApiCommand.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.restapi;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandType;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import com.google.common.base.Joiner;
public class RestApiCommand implements Command {
public enum RestApiCommandType implements CommandType {
GET,
PUT,
POST
}
private List<String> params;
private RestApiCommandType cmd;
public RestApiCommand() {
this.params = new ArrayList<>();
}
@Override
public List<String> getParams() {
return this.params;
}
@Override
public CommandType getCommandType() {
return this.cmd;
}
@Override
public Command build(Collection<String> params, CommandType cmd) {
this.params.addAll(params);
this.cmd = (RestApiCommandType) cmd;
return this;
}
@Override
public String toString() {
Joiner joiner = Joiner.on(":").skipNulls();
return this.cmd.toString() + ":" + joiner.join(this.params);
}
}
| 3,026 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/partition/AppendMaxLimitType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.partition;
public enum AppendMaxLimitType {
CURRENTDATE, CURRENTHOUR, CURRENTMINUTE, CURRENTSECOND;
}
| 3,027 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/partition/Partition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.partition;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* This class encapsulates the two ends, {@link #lowWatermark} and {@link #highWatermark}, of a partition and some
* metadata, e.g. {@link #hasUserSpecifiedHighWatermark}, to describe the partition.
*
* <p>
* A {@link Source} partitions its data into a collection of {@link Partition}, each of which will be used to create
* a {@link WorkUnit}.
*
* Currently, the {@link #lowWatermark} of a partition in Gobblin is inclusive and its {@link #highWatermark} is
* exclusive unless it is the last partition.
* </p>
*
* @author zhchen
*/
@EqualsAndHashCode
public class Partition {
public static final String IS_LAST_PARTIITON = "partition.isLastPartition";
public static final String HAS_USER_SPECIFIED_HIGH_WATERMARK = "partition.hasUserSpecifiedHighWatermark";
@Getter
private final long lowWatermark;
@Getter
private final boolean isLowWatermarkInclusive;
@Getter
private final long highWatermark;
@Getter
private final boolean isHighWatermarkInclusive;
@Getter
private final boolean isLastPartition;
/**
* Indicate if the Partition highWatermark is set as user specifies, not computed on the fly
*/
private final boolean hasUserSpecifiedHighWatermark;
public Partition(long lowWatermark, long highWatermark, boolean isLastPartition,
boolean hasUserSpecifiedHighWatermark) {
this.lowWatermark = lowWatermark;
this.highWatermark = highWatermark;
this.isLowWatermarkInclusive = true;
this.isHighWatermarkInclusive = isLastPartition;
this.isLastPartition = isLastPartition;
this.hasUserSpecifiedHighWatermark = hasUserSpecifiedHighWatermark;
}
public Partition(long lowWatermark, long highWatermark, boolean hasUserSpecifiedHighWatermark) {
this(lowWatermark, highWatermark, false, hasUserSpecifiedHighWatermark);
}
public Partition(long lowWatermark, long highWatermark) {
this(lowWatermark, highWatermark, false);
}
public boolean getHasUserSpecifiedHighWatermark() {
return hasUserSpecifiedHighWatermark;
}
public void serialize(WorkUnit workUnit) {
workUnit.setWatermarkInterval(
new WatermarkInterval(new LongWatermark(lowWatermark), new LongWatermark(highWatermark)));
if (hasUserSpecifiedHighWatermark) {
workUnit.setProp(Partition.HAS_USER_SPECIFIED_HIGH_WATERMARK, true);
}
if (isLastPartition) {
workUnit.setProp(Partition.IS_LAST_PARTIITON, true);
}
}
public static Partition deserialize(WorkUnit workUnit) {
long lowWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
long highWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
if (workUnit.getProp(ConfigurationKeys.WATERMARK_INTERVAL_VALUE_KEY) != null) {
lowWatermark = workUnit.getLowWatermark(LongWatermark.class).getValue();
highWatermark = workUnit.getExpectedHighWatermark(LongWatermark.class).getValue();
}
return new Partition(lowWatermark, highWatermark, workUnit.getPropAsBoolean(Partition.IS_LAST_PARTIITON),
workUnit.getPropAsBoolean(Partition.HAS_USER_SPECIFIED_HIGH_WATERMARK));
}
}
| 3,028 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/partition/Partitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.partition;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.ExtractType;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.extractor.watermark.DateWatermark;
import org.apache.gobblin.source.extractor.watermark.HourWatermark;
import org.apache.gobblin.source.extractor.watermark.SimpleWatermark;
import org.apache.gobblin.source.extractor.watermark.TimestampWatermark;
import org.apache.gobblin.source.extractor.watermark.WatermarkPredicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
/**
* An implementation of default partitioner for all types of sources
*/
public class Partitioner {
private static final Logger LOG = LoggerFactory.getLogger(Partitioner.class);
public static final String WATERMARKTIMEFORMAT = "yyyyMMddHHmmss";
public static final String HAS_USER_SPECIFIED_PARTITIONS = "partitioner.hasUserSpecifiedPartitions";
public static final String USER_SPECIFIED_PARTITIONS = "partitioner.userSpecifiedPartitions";
public static final String IS_EARLY_STOPPED = "partitioner.isEarlyStopped";
public static final String ALLOW_EQUAL_WATERMARK_BOUNDARY = "partitioner.allowEqualWatermarkBoundary";
public static final Comparator<Partition> ascendingComparator = new Comparator<Partition>() {
@Override
public int compare(Partition p1, Partition p2) {
if (p1 == null && p2 == null) {
return 0;
}
if (p1 == null) {
return -1;
}
if (p2 == null) {
return 1;
}
return Long.compare(p1.getLowWatermark(), p2.getLowWatermark());
}
};
private SourceState state;
/**
* Indicate if the user specifies a high watermark for the current run
*/
@VisibleForTesting
protected boolean hasUserSpecifiedHighWatermark;
public Partitioner(SourceState state) {
super();
this.state = state;
hasUserSpecifiedHighWatermark = false;
}
/**
* Get the global partition of the whole data set, which has the global low and high watermarks
*
* @param previousWatermark previous watermark for computing the low watermark of current run
* @return a Partition instance
*/
public Partition getGlobalPartition(long previousWatermark) {
ExtractType extractType =
ExtractType.valueOf(state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE).toUpperCase());
WatermarkType watermarkType = WatermarkType.valueOf(
state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, ConfigurationKeys.DEFAULT_WATERMARK_TYPE)
.toUpperCase());
WatermarkPredicate watermark = new WatermarkPredicate(null, watermarkType);
int deltaForNextWatermark = watermark.getDeltaNumForNextWatermark();
long lowWatermark = getLowWatermark(extractType, watermarkType, previousWatermark, deltaForNextWatermark);
long highWatermark = getHighWatermark(extractType, watermarkType);
return new Partition(lowWatermark, highWatermark, true, hasUserSpecifiedHighWatermark);
}
/**
* Get partitions with low and high water marks
*
* @param previousWatermark previous water mark from metadata
* @return map of partition intervals.
* map's key is interval begin time (in format {@link Partitioner#WATERMARKTIMEFORMAT})
* map's value is interval end time (in format {@link Partitioner#WATERMARKTIMEFORMAT})
*/
@Deprecated
public HashMap<Long, Long> getPartitions(long previousWatermark) {
HashMap<Long, Long> defaultPartition = Maps.newHashMap();
if (!isWatermarkExists()) {
defaultPartition.put(ConfigurationKeys.DEFAULT_WATERMARK_VALUE, ConfigurationKeys.DEFAULT_WATERMARK_VALUE);
LOG.info("Watermark column or type not found - Default partition with low watermark and high watermark as "
+ ConfigurationKeys.DEFAULT_WATERMARK_VALUE);
return defaultPartition;
}
ExtractType extractType =
ExtractType.valueOf(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE).toUpperCase());
WatermarkType watermarkType = WatermarkType.valueOf(
this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, ConfigurationKeys.DEFAULT_WATERMARK_TYPE)
.toUpperCase());
int interval =
getUpdatedInterval(this.state.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_PARTITION_INTERVAL, 0),
extractType, watermarkType);
int sourceMaxAllowedPartitions = this.state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS, 0);
int maxPartitions = (sourceMaxAllowedPartitions != 0 ? sourceMaxAllowedPartitions
: ConfigurationKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS);
WatermarkPredicate watermark = new WatermarkPredicate(null, watermarkType);
int deltaForNextWatermark = watermark.getDeltaNumForNextWatermark();
LOG.info("is watermark override: " + this.isWatermarkOverride());
LOG.info("is full extract: " + this.isFullDump());
long lowWatermark = this.getLowWatermark(extractType, watermarkType, previousWatermark, deltaForNextWatermark);
long highWatermark = this.getHighWatermark(extractType, watermarkType);
if (lowWatermark == ConfigurationKeys.DEFAULT_WATERMARK_VALUE
|| highWatermark == ConfigurationKeys.DEFAULT_WATERMARK_VALUE) {
LOG.info(
"Low watermark or high water mark is not found. Hence cannot generate partitions - Default partition with low watermark: "
+ lowWatermark + " and high watermark: " + highWatermark);
defaultPartition.put(lowWatermark, highWatermark);
return defaultPartition;
}
LOG.info("Generate partitions with low watermark: " + lowWatermark + "; high watermark: " + highWatermark
+ "; partition interval in hours: " + interval + "; Maximum number of allowed partitions: " + maxPartitions);
return watermark.getPartitions(lowWatermark, highWatermark, interval, maxPartitions);
}
/**
* Get an unordered list of partition with lowWatermark, highWatermark, and hasUserSpecifiedHighWatermark.
*
* @param previousWatermark previous water mark from metadata
* @return an unordered list of partition
*/
public List<Partition> getPartitionList(long previousWatermark) {
if (state.getPropAsBoolean(HAS_USER_SPECIFIED_PARTITIONS)) {
return createUserSpecifiedPartitions();
}
List<Partition> partitions = new ArrayList<>();
/*
* Use the deprecated getPartitions(long) as a helper function, avoid duplicating logic. When it can be removed, its
* logic will be put here.
*/
HashMap<Long, Long> partitionMap = getPartitions(previousWatermark);
if (partitionMap.size() == 0) {
return partitions;
}
if (partitionMap.size() == 1) {
Map.Entry<Long, Long> entry = partitionMap.entrySet().iterator().next();
Long lwm = entry.getKey();
Long hwm = entry.getValue();
if (lwm == hwm) {
if (lwm != -1) { // we always allow [-1, -1] interval due to some test cases relies on this logic.
boolean allowEqualBoundary = state.getPropAsBoolean(ALLOW_EQUAL_WATERMARK_BOUNDARY, false);
LOG.info("Single partition with LWM = HWM and allowEqualBoundary=" + allowEqualBoundary);
if (!allowEqualBoundary) {
return partitions;
}
}
}
}
/*
* Can't use highWatermark directly, as the partitionMap may have different precision. For example, highWatermark
* may be specified to seconds, but partitionMap could be specified to hour or date.
*/
Long highestWatermark = Collections.max(partitionMap.values());
for (Map.Entry<Long, Long> entry : partitionMap.entrySet()) {
Long partitionHighWatermark = entry.getValue();
// Apply hasUserSpecifiedHighWatermark to the last partition, which has highestWatermark
if (partitionHighWatermark.equals(highestWatermark)) {
partitions.add(new Partition(entry.getKey(), partitionHighWatermark, true, hasUserSpecifiedHighWatermark));
} else {
// The partitionHighWatermark was computed on the fly not what user specifies
partitions.add(new Partition(entry.getKey(), partitionHighWatermark, false));
}
}
return partitions;
}
/**
* Generate the partitions based on the lists specified by the user in job config
*/
private List<Partition> createUserSpecifiedPartitions() {
List<Partition> partitions = new ArrayList<>();
List<String> watermarkPoints = state.getPropAsList(USER_SPECIFIED_PARTITIONS);
boolean isEarlyStopped = state.getPropAsBoolean(IS_EARLY_STOPPED);
if (watermarkPoints == null || watermarkPoints.size() == 0 ) {
LOG.info("There should be some partition points");
long defaultWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
partitions.add(new Partition(defaultWatermark, defaultWatermark, true, true));
return partitions;
}
WatermarkType watermarkType = WatermarkType.valueOf(
state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, ConfigurationKeys.DEFAULT_WATERMARK_TYPE)
.toUpperCase());
long lowWatermark = adjustWatermark(watermarkPoints.get(0), watermarkType);
long highWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
// Only one partition point specified
if (watermarkPoints.size() == 1) {
if (watermarkType != WatermarkType.SIMPLE) {
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE);
String currentTime = Utils.dateTimeToString(getCurrentTime(timeZone), WATERMARKTIMEFORMAT, timeZone);
highWatermark = adjustWatermark(currentTime, watermarkType);
}
partitions.add(new Partition(lowWatermark, highWatermark, true, false));
return partitions;
}
int i;
for (i = 1; i < watermarkPoints.size() - 1; i++) {
highWatermark = adjustWatermark(watermarkPoints.get(i), watermarkType);
partitions.add(new Partition(lowWatermark, highWatermark, true));
lowWatermark = highWatermark;
}
// Last partition
highWatermark = adjustWatermark(watermarkPoints.get(i), watermarkType);
ExtractType extractType =
ExtractType.valueOf(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE).toUpperCase());
// If it is early stop, we should not remove upper bounds
if ((isFullDump() || isSnapshot(extractType)) && !isEarlyStopped) {
// The upper bounds can be removed for last work unit
partitions.add(new Partition(lowWatermark, highWatermark, true, false));
} else {
// The upper bounds can not be removed for last work unit
partitions.add(new Partition(lowWatermark, highWatermark, true, true));
}
return partitions;
}
/**
* Adjust a watermark based on watermark type
*
* @param baseWatermark the original watermark
* @param watermarkType Watermark Type
* @return the adjusted watermark value
*/
private static long adjustWatermark(String baseWatermark, WatermarkType watermarkType) {
long result = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
switch (watermarkType) {
case SIMPLE:
result = SimpleWatermark.adjustWatermark(baseWatermark, 0);
break;
case DATE:
result = DateWatermark.adjustWatermark(baseWatermark, 0);
break;
case HOUR:
result = HourWatermark.adjustWatermark(baseWatermark, 0);
break;
case TIMESTAMP:
result = TimestampWatermark.adjustWatermark(baseWatermark, 0);
break;
}
return result;
}
/**
* Calculate interval in hours with the given interval
*
* @param inputInterval input interval
* @param extractType Extract type
* @param watermarkType Watermark type
* @return interval in range
*/
private static int getUpdatedInterval(int inputInterval, ExtractType extractType, WatermarkType watermarkType) {
LOG.debug("Getting updated interval");
if ((extractType == ExtractType.SNAPSHOT && watermarkType == WatermarkType.DATE)) {
return inputInterval * 24;
} else if (extractType == ExtractType.APPEND_DAILY) {
return (inputInterval < 1 ? 1 : inputInterval) * 24;
} else {
return inputInterval;
}
}
/**
* Get low water mark:
* (1) Use {@link ConfigurationKeys#SOURCE_QUERYBASED_START_VALUE} iff it is a full dump (or watermark override is enabled)
* (2) Otherwise use previous watermark (fallback to {@link ConfigurationKeys#SOURCE_QUERYBASED_START_VALUE} iff previous watermark is unavailable)
*
* @param extractType Extract type
* @param watermarkType Watermark type
* @param previousWatermark Previous water mark
* @param deltaForNextWatermark delta number for next water mark
* @return low water mark in {@link Partitioner#WATERMARKTIMEFORMAT}
*/
@VisibleForTesting
protected long getLowWatermark(ExtractType extractType, WatermarkType watermarkType, long previousWatermark,
int deltaForNextWatermark) {
long lowWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
if (this.isFullDump() || this.isWatermarkOverride()) {
String timeZone =
this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE);
/*
* SOURCE_QUERYBASED_START_VALUE could be:
* - a simple string, e.g. "12345"
* - a timestamp string, e.g. "20140101000000"
* - a string with a time directive, e.g. "CURRENTDAY-X", "CURRENTHOUR-X", (X is a number)
*/
lowWatermark =
Utils.getLongWithCurrentDate(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_START_VALUE), timeZone);
LOG.info("Overriding low water mark with the given start value: " + lowWatermark);
} else {
if (isSnapshot(extractType)) {
lowWatermark = this.getSnapshotLowWatermark(watermarkType, previousWatermark, deltaForNextWatermark);
} else {
lowWatermark = this.getAppendLowWatermark(watermarkType, previousWatermark, deltaForNextWatermark);
}
}
return (lowWatermark == 0 ? ConfigurationKeys.DEFAULT_WATERMARK_VALUE : lowWatermark);
}
/**
* Get low water mark
*
* @param watermarkType Watermark type
* @param previousWatermark Previous water mark
* @param deltaForNextWatermark delta number for next water mark
* @return Previous watermark (fallback to {@link ConfigurationKeys#SOURCE_QUERYBASED_START_VALUE} iff previous watermark is unavailable)
*/
private long getSnapshotLowWatermark(WatermarkType watermarkType, long previousWatermark, int deltaForNextWatermark) {
LOG.debug("Getting snapshot low water mark");
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE);
if (isPreviousWatermarkExists(previousWatermark)) {
if (isSimpleWatermark(watermarkType)) {
return previousWatermark + deltaForNextWatermark - this.state
.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_LOW_WATERMARK_BACKUP_SECS, 0);
}
DateTime wm = Utils.toDateTime(previousWatermark, WATERMARKTIMEFORMAT, timeZone).plusSeconds(
(deltaForNextWatermark - this.state
.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_LOW_WATERMARK_BACKUP_SECS, 0)));
return Long.parseLong(Utils.dateTimeToString(wm, WATERMARKTIMEFORMAT, timeZone));
}
// If previous watermark is not found, override with the start value
// (irrespective of source.is.watermark.override flag)
long startValue =
Utils.getLongWithCurrentDate(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_START_VALUE), timeZone);
LOG.info("Overriding low water mark with the given start value: " + startValue);
return startValue;
}
/**
* Get low water mark
*
* @param watermarkType Watermark type
* @param previousWatermark Previous water mark
* @param deltaForNextWatermark delta number for next water mark
* @return Previous watermark (fallback to {@link ConfigurationKeys#SOURCE_QUERYBASED_START_VALUE} iff previous watermark is unavailable)
*/
private long getAppendLowWatermark(WatermarkType watermarkType, long previousWatermark, int deltaForNextWatermark) {
LOG.debug("Getting append low water mark");
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE);
if (isPreviousWatermarkExists(previousWatermark)) {
if (isSimpleWatermark(watermarkType)) {
return previousWatermark + deltaForNextWatermark;
}
DateTime wm =
Utils.toDateTime(previousWatermark, WATERMARKTIMEFORMAT, timeZone).plusSeconds(deltaForNextWatermark);
return Long.parseLong(Utils.dateTimeToString(wm, WATERMARKTIMEFORMAT, timeZone));
}
LOG.info("Overriding low water mark with start value: " + ConfigurationKeys.SOURCE_QUERYBASED_START_VALUE);
return Utils.getLongWithCurrentDate(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_START_VALUE), timeZone);
}
/**
* Get high water mark
*
* @param extractType Extract type
* @param watermarkType Watermark type
* @return high water mark in {@link Partitioner#WATERMARKTIMEFORMAT}
*/
@VisibleForTesting
protected long getHighWatermark(ExtractType extractType, WatermarkType watermarkType) {
LOG.debug("Getting high watermark");
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE);
long highWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
if (this.isWatermarkOverride()) {
highWatermark = this.state.getPropAsLong(ConfigurationKeys.SOURCE_QUERYBASED_END_VALUE, 0);
if (highWatermark == 0) {
highWatermark = Long.parseLong(Utils.dateTimeToString(getCurrentTime(timeZone), WATERMARKTIMEFORMAT, timeZone));
} else {
// User specifies SOURCE_QUERYBASED_END_VALUE
hasUserSpecifiedHighWatermark = true;
}
LOG.info("Overriding high water mark with the given end value:" + highWatermark);
} else {
if (isSnapshot(extractType)) {
highWatermark = this.getSnapshotHighWatermark(watermarkType);
} else {
highWatermark = this.getAppendHighWatermark(extractType);
}
}
return (highWatermark == 0 ? ConfigurationKeys.DEFAULT_WATERMARK_VALUE : highWatermark);
}
/**
* Get snapshot high water mark
*
* @param watermarkType Watermark type
* @return snapshot high water mark
*/
private long getSnapshotHighWatermark(WatermarkType watermarkType) {
LOG.debug("Getting snapshot high water mark");
if (isSimpleWatermark(watermarkType)) {
return ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
}
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE);
return Long.parseLong(Utils.dateTimeToString(getCurrentTime(timeZone), WATERMARKTIMEFORMAT, timeZone));
}
/**
* Get append high water mark
*
* @param extractType Extract type
* @return append high water mark
*/
private long getAppendHighWatermark(ExtractType extractType) {
LOG.debug("Getting append high water mark");
if (this.isFullDump()) {
LOG.info("Overriding high water mark with end value:" + ConfigurationKeys.SOURCE_QUERYBASED_END_VALUE);
long highWatermark = this.state.getPropAsLong(ConfigurationKeys.SOURCE_QUERYBASED_END_VALUE, 0);
if (highWatermark != 0) {
// User specifies SOURCE_QUERYBASED_END_VALUE
hasUserSpecifiedHighWatermark = true;
}
return highWatermark;
}
return this.getAppendWatermarkCutoff(extractType);
}
/**
* Get cutoff for high water mark
*
* @param extractType Extract type
* @return cutoff
*/
private long getAppendWatermarkCutoff(ExtractType extractType) {
LOG.debug("Getting append water mark cutoff");
long highWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE);
AppendMaxLimitType limitType = getAppendLimitType(extractType,
this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT));
if (limitType == null) {
LOG.debug("Limit type is not found");
return highWatermark;
}
int limitDelta =
getAppendLimitDelta(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT));
// if it is CURRENTDATE or CURRENTHOUR then high water mark is current time
if (limitDelta == 0) {
highWatermark = Long.parseLong(Utils.dateTimeToString(getCurrentTime(timeZone), WATERMARKTIMEFORMAT, timeZone));
}
// if CURRENTDATE or CURRENTHOUR has offset then high water mark is end of day of the given offset
else {
int seconds = 3599; // x:59:59
String format = null;
switch (limitType) {
case CURRENTDATE:
format = "yyyyMMdd";
limitDelta = limitDelta * 24 * 60 * 60;
seconds = 86399; // 23:59:59
break;
case CURRENTHOUR:
format = "yyyyMMddHH";
limitDelta = limitDelta * 60 * 60;
seconds = 3599; // x:59:59
break;
case CURRENTMINUTE:
format = "yyyyMMddHHmm";
limitDelta = limitDelta * 60;
seconds = 59;
break;
case CURRENTSECOND:
format = "yyyyMMddHHmmss";
seconds = 0;
break;
default:
break;
}
DateTime deltaTime = getCurrentTime(timeZone).minusSeconds(limitDelta);
DateTime previousTime =
Utils.toDateTime(Utils.dateTimeToString(deltaTime, format, timeZone), format, timeZone).plusSeconds(seconds);
highWatermark = Long.parseLong(Utils.dateTimeToString(previousTime, WATERMARKTIMEFORMAT, timeZone));
// User specifies SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT
hasUserSpecifiedHighWatermark = true;
}
return highWatermark;
}
/**
* Get append max limit type from the input
*
* @param extractType Extract type
* @param maxLimit
* @return Max limit type
*/
private static AppendMaxLimitType getAppendLimitType(ExtractType extractType, String maxLimit) {
LOG.debug("Getting append limit type");
AppendMaxLimitType limitType;
switch (extractType) {
case APPEND_DAILY:
limitType = AppendMaxLimitType.CURRENTDATE;
break;
case APPEND_HOURLY:
limitType = AppendMaxLimitType.CURRENTHOUR;
break;
default:
limitType = null;
break;
}
if (!Strings.isNullOrEmpty(maxLimit)) {
LOG.debug("Getting append limit type from the config");
String[] limitParams = maxLimit.split("-");
if (limitParams.length >= 1) {
limitType = AppendMaxLimitType.valueOf(limitParams[0]);
}
}
return limitType;
}
/**
* Get append max limit delta num
*
* @param maxLimit
* @return Max limit delta number
*/
private static int getAppendLimitDelta(String maxLimit) {
LOG.debug("Getting append limit delta");
int limitDelta = 0;
if (!Strings.isNullOrEmpty(maxLimit)) {
String[] limitParams = maxLimit.split("-");
if (limitParams.length >= 2) {
limitDelta = Integer.parseInt(limitParams[1]);
}
}
return limitDelta;
}
/**
* true if previous water mark equals default water mark
*
* @param previousWatermark previous water mark
* @return true if previous water mark exists
*/
private static boolean isPreviousWatermarkExists(long previousWatermark) {
if (!(previousWatermark == ConfigurationKeys.DEFAULT_WATERMARK_VALUE)) {
return true;
}
return false;
}
/**
* true if water mark columns and water mark type provided
*
* @return true if water mark exists
*/
private boolean isWatermarkExists() {
if (!Strings.isNullOrEmpty(this.state.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY)) && !Strings
.isNullOrEmpty(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE))) {
return true;
}
return false;
}
private static boolean isSnapshot(ExtractType extractType) {
if (extractType == ExtractType.SNAPSHOT) {
return true;
}
return false;
}
private static boolean isSimpleWatermark(WatermarkType watermarkType) {
if (watermarkType == WatermarkType.SIMPLE) {
return true;
}
return false;
}
/**
* If full dump is true, the low watermark will be based on {@link ConfigurationKeys#SOURCE_QUERYBASED_START_VALUE}
* Otherwise it will base on the previous watermark. Please refer to {@link Partitioner#getLowWatermark(ExtractType, WatermarkType, long, int)}
* @return full dump or not
*/
public boolean isFullDump() {
return Boolean.valueOf(this.state.getProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY));
}
/**
* @return full dump or not
*/
public boolean isWatermarkOverride() {
return Boolean.valueOf(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_WATERMARK_OVERRIDE));
}
/**
* This thin function is introduced to facilitate testing, a way to mock current time
*
* @return current time in the given timeZone
*/
@VisibleForTesting
public DateTime getCurrentTime(String timeZone) {
return Utils.getCurrentTime(timeZone);
}
}
| 3,029 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/RecordCountException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class RecordCountException extends Exception {
private static final long serialVersionUID = 1L;
public RecordCountException(String message) {
super(message);
}
public RecordCountException(String message, Exception e) {
super(message, e);
}
}
| 3,030 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/RestApiConnectionException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class RestApiConnectionException extends Exception {
private static final long serialVersionUID = 1L;
public RestApiConnectionException(String message) {
super(message);
}
public RestApiConnectionException(String message, Exception e) {
super(message, e);
}
}
| 3,031 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/SchemaException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class SchemaException extends Exception {
private static final long serialVersionUID = 1L;
public SchemaException(String message) {
super(message);
}
public SchemaException(String message, Exception e) {
super(message, e);
}
}
| 3,032 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/MetadataException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class MetadataException extends Exception {
private static final long serialVersionUID = 1L;
public MetadataException(String message) {
super(message);
}
public MetadataException(String message, Exception e) {
super(message, e);
}
}
| 3,033 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/RestApiProcessingException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class RestApiProcessingException extends Exception {
private static final long serialVersionUID = 1L;
public RestApiProcessingException(String message) {
super(message);
}
public RestApiProcessingException(String message, Exception e) {
super(message, e);
}
}
| 3,034 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/ExtractPrepareException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class ExtractPrepareException extends Exception {
private static final long serialVersionUID = 1L;
public ExtractPrepareException(String message) {
super(message);
}
public ExtractPrepareException(String message, Exception e) {
super(message, e);
}
}
| 3,035 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/RestApiClientException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class RestApiClientException extends Exception {
private static final long serialVersionUID = 1L;
public RestApiClientException(String message) {
super(message);
}
public RestApiClientException(String message, Exception e) {
super(message, e);
}
}
| 3,036 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/exception/HighWatermarkException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.exception;
public class HighWatermarkException extends Exception {
private static final long serialVersionUID = 1L;
public HighWatermarkException(String message) {
super(message);
}
public HighWatermarkException(String message, Exception e) {
super(message, e);
}
}
| 3,037 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/async/DispatchException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
/**
* Exception for dispatching failures. By default, it is a fatal exception
*/
public class DispatchException extends Exception {
private final boolean isFatalException;
public DispatchException(String message, Exception e) {
this(message, e, true);
}
public DispatchException(String message) {
this(message, true);
}
public DispatchException(String message, Exception e, boolean isFatal) {
super(message, e);
isFatalException = isFatal;
}
public DispatchException(String message, boolean isFatal) {
super(message);
isFatalException = isFatal;
}
public boolean isFatal() {
return isFatalException;
}
}
| 3,038 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/async/AsyncRequestBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
import java.util.Queue;
/**
* An interface to build an async request from a buffer of records
*
* @param <D> type of record
* @param <RQ> type of request
*/
public interface AsyncRequestBuilder<D, RQ> {
AsyncRequest<D, RQ> buildRequest(Queue<BufferedRecord<D>> buffer);
}
| 3,039 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/async/AsyncRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
import java.util.ArrayList;
import java.util.List;
import com.google.common.collect.ImmutableList;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.net.Request;
/**
* A type of write request which may batch several records at a time. It encapsulates the
* raw request, batch level statistics, and callback of each record
*
* @param <D> type of data record
* @param <RQ> type of raw request
*/
public class AsyncRequest<D, RQ> implements Request<RQ> {
@Getter @Setter
private RQ rawRequest;
protected final List<Thunk<D>> thunks = new ArrayList<>();
private long byteSize = 0;
/**
* Get the total number of records processed in the request
*/
public int getRecordCount() {
return thunks.size();
}
/**
* Get the total bytes processed in the request
*/
public long getBytesWritten() {
return this.byteSize;
}
/**
* Get all records information in the request
*/
public List<Thunk<D>> getThunks() {
return ImmutableList.copyOf(thunks);
}
/**
* Mark the record associated with this request
*
* @param record buffered record
* @param bytesWritten bytes of the record written into the request
*/
public void markRecord(BufferedRecord<D> record, int bytesWritten) {
synchronized (this) {
thunks.add(new Thunk<>(record, bytesWritten));
byteSize += bytesWritten;
}
}
/**
* A descriptor that represents a record in the request
*/
public static final class Thunk<D> {
/**
* @deprecated Use {@link #record}
*/
@Deprecated
public final Callback callback;
public final int sizeInBytes;
public final BufferedRecord<D> record;
/**
* @deprecated Use {@link #Thunk(BufferedRecord, int)}
*/
@Deprecated
Thunk(Callback callback, int sizeInBytes) {
this.callback = callback;
this.sizeInBytes = sizeInBytes;
this.record = null;
}
Thunk(BufferedRecord<D> record, int sizeInBytes) {
this.callback = record.getCallback();
this.sizeInBytes = sizeInBytes;
this.record = record;
}
}
}
| 3,040 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/async/AsyncDataDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
import java.util.Queue;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.AbstractExecutionThreadService;
import javax.annotation.concurrent.NotThreadSafe;
/**
* Base class with skeleton logic to dispatch a record asynchronously. It buffers the records and consumes
* them by {@link #run()}
*
* <p>
* However the records are consumed depends on the actual implementation of {@link #dispatch(Queue)}, which
* may process one record or a batch at a time
* </p>
*
* @param <D> type of record
*/
@NotThreadSafe
public abstract class AsyncDataDispatcher<D> extends AbstractExecutionThreadService {
private static final Logger LOG = LoggerFactory.getLogger(AsyncDataDispatcher.class);
// Queue to buffer records
private final BlockingQueue<D> buffer;
// Lock for isBufferEmpty condition
private final Lock lock;
private final Condition isBufferEmpty;
public AsyncDataDispatcher(int capacity) {
super();
buffer = new ArrayBlockingQueue<>(capacity);
lock = new ReentrantLock(true);
isBufferEmpty = lock.newCondition();
startAsync();
awaitRunning();
}
/**
* Synchronously dispatch records in the buffer. Retries should be done if necessary. Every record
* consumed from the buffer must have its callback called if any.
*
* @param buffer the buffer which contains a collection of records
* @throws DispatchException if dispatch failed
*/
protected abstract void dispatch(Queue<D> buffer)
throws DispatchException;
protected void put(D record) {
// Accept new record only if dispatcher is running
checkRunning("put");
try {
buffer.put(record);
// Check after a blocking put
if (!isRunning()) {
// Purge out the record which was just put into the buffer
buffer.clear();
RuntimeException e = new RuntimeException("Attempt to operate when writer is " + state().name());
LOG.error("put", e);
throw e;
}
} catch (InterruptedException e) {
throw new RuntimeException("Waiting to put a record interrupted", e);
}
}
@Override
protected void run()
throws Exception {
LOG.info("Start processing records");
// A main loop to process records
while (true) {
while (buffer.isEmpty()) {
// Buffer is empty
notifyBufferEmptyOccurrence();
if (!isRunning()) {
// Clean return
return;
}
// Waiting for some time to get some records
try {
Thread.sleep(300);
} catch (InterruptedException e) {
LOG.warn("Dispatcher sleep interrupted", e);
break;
}
}
// Dispatch records
try {
dispatch(buffer);
} catch (DispatchException e) {
LOG.error("Dispatch incurs an exception", e);
if (e.isFatal()) {
// Mark stopping
stopAsync();
// Drain the buffer
buffer.clear();
// Wake up the threads waiting on buffer empty occurrence
notifyBufferEmptyOccurrence();
throw e;
}
}
}
}
/**
* A blocking terminate
*/
public void terminate() {
stopAsync().awaitTerminated();
}
protected void checkRunning(String forWhat) {
if (!isRunning()) {
RuntimeException e = new RuntimeException("Attempt to operate when writer is " + state().name());
LOG.error(forWhat, e);
throw e;
}
}
protected void waitForBufferEmpty() {
checkRunning("waitForBufferEmpty");
try {
lock.lock();
// Waiting buffer empty
while (!buffer.isEmpty()) {
try {
isBufferEmpty.await();
} catch (InterruptedException e) {
throw new RuntimeException("Waiting for buffer flush interrupted", e);
}
}
} finally {
lock.unlock();
checkRunning("waitForBufferEmpty");
}
}
private void notifyBufferEmptyOccurrence() {
try {
lock.lock();
isBufferEmpty.signalAll();
} finally {
lock.unlock();
}
}
}
| 3,041 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/async/BufferedRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* This class represents a record in a buffer
*/
@AllArgsConstructor
@Getter
public class BufferedRecord<D> {
private final D record;
private final Callback callback;
}
| 3,042 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/destination/DestinationDatasetHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.destination;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
/**
* Performs work related to initializing the target environment before the files are written and published.
* Implementations should be aware that a {@link WorkUnitStream} may be of streaming type.
*/
public interface DestinationDatasetHandler extends Closeable {
/**
* Handle destination setup before workunits are sent to writer and publisher
* This method is deprecated in favor of {@link #handle(WorkUnitStream)}.
* @param workUnits
*/
@Deprecated
default void handle(Collection<WorkUnit> workUnits) throws IOException {}
default WorkUnitStream handle(WorkUnitStream workUnitStream) throws IOException {
return workUnitStream;
}
/**
* Perform cleanup if needed
* @throws IOException
*/
void close() throws IOException;
}
| 3,043 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/destination/DestinationDatasetHandlerService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.destination;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.source.workunit.WorkUnitStream;
/**
* Initializes and runs handlers on workunits before writers are initialized
* Reads {@link ConfigurationKeys#DESTINATION_DATASET_HANDLER_CLASS} as a list
* of classes, separated by comma to initialize the handlers
*/
public class DestinationDatasetHandlerService implements Closeable {
List<DestinationDatasetHandler> handlers;
public DestinationDatasetHandlerService(SourceState jobState, Boolean canCleanUp, EventSubmitter eventSubmitter) {
this.handlers = new ArrayList<>();
if (jobState.contains(ConfigurationKeys.DESTINATION_DATASET_HANDLER_CLASS)) {
List<String> handlerList = jobState.getPropAsList(ConfigurationKeys.DESTINATION_DATASET_HANDLER_CLASS);
for (String handlerClass : handlerList) {
this.handlers.add(DestinationDatasetHandlerFactory.newInstance(handlerClass, jobState, canCleanUp));
}
}
}
/**
* Executes handlers
* @param workUnitStream
*/
public WorkUnitStream executeHandlers(WorkUnitStream workUnitStream) {
for (DestinationDatasetHandler handler : this.handlers) {
try {
workUnitStream = handler.handle(workUnitStream);
} catch (IOException e) {
throw new RuntimeException(String.format("Handler %s failed to execute", handler.getClass().getName()), e);
}
}
return workUnitStream;
}
public void close() throws IOException {
for (DestinationDatasetHandler handler: this.handlers) {
handler.close();
}
}
}
| 3,044 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/destination/DestinationDatasetHandlerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.destination;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
public class DestinationDatasetHandlerFactory {
public static DestinationDatasetHandler newInstance(String handlerTypeName, SourceState state, Boolean canCleanUp) {
try {
ClassAliasResolver<DestinationDatasetHandler> aliasResolver = new ClassAliasResolver<>(DestinationDatasetHandler.class);
DestinationDatasetHandler handler = GobblinConstructorUtils.invokeLongestConstructor(
aliasResolver.resolveClass(handlerTypeName), state, canCleanUp);
return handler;
} catch (ReflectiveOperationException rfe) {
throw new RuntimeException("Could not construct DestinationDatasetHandler " + handlerTypeName, rfe);
}
}
}
| 3,045 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/state/ConstructState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.state;
import java.lang.reflect.Type;
import java.util.Map;
import java.util.Properties;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.gobblin.Constructs;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Contains the state of a Gobblin construct at the end of a task. It can be merged with the {@link WorkUnitState},
* allowing constructs to mutate the {@link WorkUnitState} (for example for a changed effective watermark), or add
* values to report as metadata of task success/failure events.
*/
public class ConstructState extends State {
private static final Gson GSON = new Gson();
private static Type TYPE_OF_HASHMAP = new TypeToken<Map<String, String>>() { }.getType();
private static final String OVERWRITE_PROPS_KEY = "gobblin.util.final.state.overwrite.props";
private static final String FINAL_CONSTRUCT_STATE_PREFIX = "construct.final.state.";
public ConstructState() {
}
public ConstructState(Properties properties) {
super(properties);
}
public ConstructState(State otherState) {
super(otherState);
}
/**
* Add a set of properties that will overwrite properties in the {@link WorkUnitState}.
* @param properties Properties to override.
*/
public void addOverwriteProperties(Map<String, String> properties) {
Map<String, String> previousOverwriteProps = getOverwritePropertiesMap();
previousOverwriteProps.putAll(properties);
setProp(OVERWRITE_PROPS_KEY, serializeMap(previousOverwriteProps));
}
/**
* Add a set of properties that will overwrite properties in the {@link WorkUnitState}.
* @param state Properties to override.
*/
public void addOverwriteProperties(State state) {
Map<String, String> propsMap = Maps.newHashMap();
for (String key : state.getPropertyNames()) {
propsMap.put(key, state.getProp(key));
}
addOverwriteProperties(propsMap);
}
/**
* See {@link #addConstructState(Constructs, ConstructState, Optional)}. This method uses no infix.
*/
public void addConstructState(Constructs construct, ConstructState constructState) {
addConstructState(construct, constructState, Optional.<String>absent());
}
/**
* See {@link #addConstructState(Constructs, ConstructState, Optional)}. This is a convenience method to pass a
* String infix.
*/
public void addConstructState(Constructs construct, ConstructState constructState, String infix) {
addConstructState(construct, constructState, Optional.of(infix));
}
/**
* Merge a {@link ConstructState} for a child construct into this {@link ConstructState}.
*
* <p>
* Non-override property names will be mutated as follows: key -> construct.name() + infix + key
* </p>
*
* @param construct type of the child construct.
* @param constructState {@link ConstructState} to merge.
* @param infix infix added to each non-override key (for example converter number if there are multiple converters).
*/
public void addConstructState(Constructs construct, ConstructState constructState, Optional<String> infix) {
addOverwriteProperties(constructState.getOverwritePropertiesMap());
constructState.removeProp(OVERWRITE_PROPS_KEY);
for (String key : constructState.getPropertyNames()) {
setProp(construct.name() + "." + (infix.isPresent() ? infix.get() + "." : "") + key, constructState.getProp(key));
}
addAll(constructState);
}
/**
* Merge this {@link ConstructState} into a {@link WorkUnitState}. All override properties will be added as-is to the
* {@lik WorkUnitState}, and possibly override already present properties. All other properties have their keys
* mutated key -> {@link #FINAL_CONSTRUCT_STATE_PREFIX} + key, and added to the {@link WorkUnitState}.
*/
public void mergeIntoWorkUnitState(WorkUnitState state) {
Properties overwriteProperties = getOverwriteProperties();
state.addAll(overwriteProperties);
removeProp(OVERWRITE_PROPS_KEY);
for (String key : getPropertyNames()) {
state.setProp(FINAL_CONSTRUCT_STATE_PREFIX + key, getProp(key));
}
}
/**
* @return a {@link Map} of all override properties.
*/
public Map<String, String> getOverwritePropertiesMap() {
return contains(OVERWRITE_PROPS_KEY) ?
deserializeMap(getProp(OVERWRITE_PROPS_KEY)) :
Maps.<String, String>newHashMap();
}
/**
* @return a {@link Properties} object of all override properties.
*/
public Properties getOverwriteProperties() {
Properties props = new Properties();
props.putAll(getOverwritePropertiesMap());
return props;
}
private static String serializeMap(Map<String, String> map) {
return GSON.toJson(map);
}
private static Map<String, String> deserializeMap(String string) {
return GSON.fromJson(string, TYPE_OF_HASHMAP);
}
}
| 3,046 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/count/RowCountRangePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.count;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicy;
public class RowCountRangePolicy extends TaskLevelPolicy {
private final long rowsRead;
private final long rowsWritten;
private final double range;
private static final Logger LOG = LoggerFactory.getLogger(RowCountRangePolicy.class);
public RowCountRangePolicy(State state, Type type) {
super(state, type);
this.rowsRead = state.getPropAsLong(ConfigurationKeys.EXTRACTOR_ROWS_EXPECTED);
this.rowsWritten = state.getPropAsLong(ConfigurationKeys.WRITER_ROWS_WRITTEN);
this.range = state.getPropAsDouble(ConfigurationKeys.ROW_COUNT_RANGE);
}
@Override
public Result executePolicy() {
double computedRange = Math.abs((this.rowsWritten - this.rowsRead) / (double) this.rowsRead);
if (computedRange <= this.range) {
return Result.PASSED;
}
LOG.error(String.format(
"RowCountRangePolicy check failed. Rows read %s, Rows written %s, computed range %s, expected range %s ",
this.rowsRead, this.rowsWritten, computedRange, this.range));
return Result.FAILED;
}
}
| 3,047 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/count/RowCountPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.count;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicy;
public class RowCountPolicy extends TaskLevelPolicy {
private static final Logger LOG = LoggerFactory.getLogger(RowCountPolicy.class);
private final long rowsRead;
private final long rowsWritten;
public RowCountPolicy(State state, TaskLevelPolicy.Type type) {
super(state, type);
this.rowsRead = state.getPropAsLong(ConfigurationKeys.EXTRACTOR_ROWS_EXPECTED);
this.rowsWritten = state.getPropAsLong(ConfigurationKeys.WRITER_ROWS_WRITTEN);
}
@Override
public Result executePolicy() {
if (this.rowsRead == this.rowsWritten) {
return Result.PASSED;
}
LOG.warn(this.getClass().getSimpleName() + " fails as read count and write count mismatch: " + this);
return Result.FAILED;
}
@Override
public String toString() {
return String.format("RowCountPolicy [rowsRead=%s, rowsWritten=%s]", this.rowsRead, this.rowsWritten);
}
}
| 3,048 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/time/RecordTimestampLowerBoundPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.time;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
import org.apache.gobblin.writer.partitioner.TimeBasedWriterPartitioner;
/**
* An abstract {@link RowLevelPolicy} for checking a record's timestamp against the earliest allowed timestamp.
* Records whose timestamps are earlier than the earliest allowed timestamp will fail.
*
* @author Ziyang Liu
*/
public abstract class RecordTimestampLowerBoundPolicy extends RowLevelPolicy {
public static final String RECORD_MAX_ALLOWED_TIME_AGO = "record.max.allowed.time.ago";
public static final PeriodFormatter PERIOD_FORMATTER = new PeriodFormatterBuilder().appendMonths().appendSuffix("m")
.appendDays().appendSuffix("d").appendHours().appendSuffix("h").toFormatter();
@SuppressWarnings("rawtypes")
protected final TimeBasedWriterPartitioner partitioner;
protected final DateTimeZone timeZone;
protected final Optional<Long> earliestAllowedTimestamp;
public RecordTimestampLowerBoundPolicy(State state, Type type) {
super(state, type);
this.partitioner = getPartitioner();
this.timeZone = DateTimeZone.forID(
state.getProp(ConfigurationKeys.QUALITY_CHECKER_TIMEZONE, ConfigurationKeys.DEFAULT_QUALITY_CHECKER_TIMEZONE));
this.earliestAllowedTimestamp = getEarliestAllowedTimestamp();
}
private Optional<Long> getEarliestAllowedTimestamp() {
if (!this.state.contains(RECORD_MAX_ALLOWED_TIME_AGO)) {
return Optional.<Long> absent();
}
DateTime currentTime = new DateTime(this.timeZone);
String maxTimeAgoStr = this.state.getProp(RECORD_MAX_ALLOWED_TIME_AGO);
Period maxTimeAgo = PERIOD_FORMATTER.parsePeriod(maxTimeAgoStr);
return Optional.of(currentTime.minus(maxTimeAgo).getMillis());
}
protected abstract TimeBasedWriterPartitioner<?> getPartitioner();
@Override
public Result executePolicy(Object record) {
@SuppressWarnings("unchecked")
long recordTimestamp = this.partitioner.getRecordTimestamp(record);
if (this.earliestAllowedTimestamp.isPresent() && recordTimestamp < this.earliestAllowedTimestamp.get()) {
return RowLevelPolicy.Result.FAILED;
}
return RowLevelPolicy.Result.PASSED;
}
}
| 3,049 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/schema/SchemaRowCheckPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.schema;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
public class SchemaRowCheckPolicy extends RowLevelPolicy {
public SchemaRowCheckPolicy(State state, Type type) {
super(state, type);
}
@Override
public Result executePolicy(Object record) {
return RowLevelPolicy.Result.PASSED;
}
}
| 3,050 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/schema/SchemaCompatibilityPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.schema;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicy;
public class SchemaCompatibilityPolicy extends TaskLevelPolicy {
private static final Logger log = LoggerFactory.getLogger(SchemaCompatibilityPolicy.class);
private State state;
private State previousState;
public SchemaCompatibilityPolicy(State state, Type type) {
super(state, type);
this.state = state;
this.previousState = this.getPreviousTableState();
}
@Override
public Result executePolicy() {
// TODO how do you test for backwards compatibility?
if (this.previousState.getProp(ConfigurationKeys.EXTRACT_SCHEMA) == null) {
log.info("Previous Task State does not contain a schema");
return Result.PASSED;
}
if (this.state.getProp(ConfigurationKeys.EXTRACT_SCHEMA)
.equals(this.previousState.getProp(ConfigurationKeys.EXTRACT_SCHEMA))) {
return Result.PASSED;
}
return Result.FAILED;
}
}
| 3,051 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/avro/AvroHeaderTimestampPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.avro;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
/**
* A class that checks whether an Avro record has header.time or header.timestamp field.
*
* @author Ziyang Liu
*/
public class AvroHeaderTimestampPolicy extends RowLevelPolicy {
public AvroHeaderTimestampPolicy(State state, Type type) {
super(state, type);
}
/**
* Return PASS if the record has either header.time or header.timestamp field.
*/
@Override
public Result executePolicy(Object record) {
if (!(record instanceof GenericRecord)) {
return RowLevelPolicy.Result.FAILED;
}
GenericRecord header = (GenericRecord) ((GenericRecord) record).get("header");
if (header == null) {
return RowLevelPolicy.Result.FAILED;
}
if (header.get("time") != null || header.get("timestamp") != null) {
return RowLevelPolicy.Result.PASSED;
}
return RowLevelPolicy.Result.FAILED;
}
}
| 3,052 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/avro/AvroRecordTimestampLowerBoundPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.avro;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.policies.time.RecordTimestampLowerBoundPolicy;
import org.apache.gobblin.writer.partitioner.TimeBasedAvroWriterPartitioner;
import org.apache.gobblin.writer.partitioner.TimeBasedWriterPartitioner;
/**
* An implementation of {@link RecordTimestampLowerBoundPolicy} for Avro records.
*
* @author Ziyang Liu
*/
public class AvroRecordTimestampLowerBoundPolicy extends RecordTimestampLowerBoundPolicy {
public AvroRecordTimestampLowerBoundPolicy(State state, Type type) {
super(state, type);
}
@Override
protected TimeBasedWriterPartitioner<?> getPartitioner() {
return new TimeBasedAvroWriterPartitioner(this.state);
}
}
| 3,053 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/policies/avro/AvroHeaderGuidPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.policies.avro;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
/**
* A policy that checks whether an Avro record has header.guid field.
*
* @author Ziyang Liu
*/
public class AvroHeaderGuidPolicy extends RowLevelPolicy {
public AvroHeaderGuidPolicy(State state, Type type) {
super(state, type);
}
@Override
public Result executePolicy(Object record) {
if (!(record instanceof GenericRecord)) {
return RowLevelPolicy.Result.FAILED;
}
GenericRecord header = (GenericRecord) ((GenericRecord) record).get("header");
if (header == null || header.get("guid") == null) {
return RowLevelPolicy.Result.FAILED;
}
return RowLevelPolicy.Result.PASSED;
}
}
| 3,054 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/FsDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.Collections;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.WriterUtils;
/**
* A abstract {@link DataWriterBuilder} for building {@link DataWriter}s that write to
* {@link org.apache.hadoop.fs.FileSystem}s.
*
* @param <S> schema type
* @param <S> data record type
*
* @author Ziyang Liu
*/
public abstract class FsDataWriterBuilder<S, D> extends PartitionAwareDataWriterBuilder<S, D> {
public static final String WRITER_INCLUDE_PARTITION_IN_FILE_NAMES =
ConfigurationKeys.WRITER_PREFIX + ".include.partition.in.file.names";
public static final String WRITER_REPLACE_PATH_SEPARATORS_IN_PARTITIONS =
ConfigurationKeys.WRITER_PREFIX + ".replace.path.separators.in.partitions";
private List<StreamCodec> encoders;
/**
* Get the file name to be used by the writer. If a {@link org.apache.gobblin.writer.partitioner.WriterPartioner} is used,
* the partition will be added as part of the file name.
*/
public String getFileName(State properties) {
String extension =
this.format.equals(WriterOutputFormat.OTHER) ? getExtension(properties) : this.format.getExtension();
String fileName = WriterUtils.getWriterFileName(properties, this.branches, this.branch, this.writerId, extension);
if (this.partition.isPresent()) {
fileName = getPartitionedFileName(properties, fileName);
}
List<StreamCodec> encoders = getEncoders();
if (!encoders.isEmpty()) {
StringBuilder filenameBuilder = new StringBuilder(fileName);
for (StreamCodec codec : encoders) {
filenameBuilder.append('.');
filenameBuilder.append(codec.getTag());
}
fileName = filenameBuilder.toString();
}
return fileName;
}
private static String getExtension(State properties) {
return properties.getProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, StringUtils.EMPTY);
}
protected String getPartitionPath(State properties) {
if (this.partition.isPresent()) {
boolean includePartitionerFieldNames = properties.getPropAsBoolean(ForkOperatorUtils
.getPropertyNameForBranch(WRITER_INCLUDE_PARTITION_IN_FILE_NAMES, this.branches, this.branch), false);
boolean removePathSeparators = properties.getPropAsBoolean(ForkOperatorUtils
.getPropertyNameForBranch(WRITER_REPLACE_PATH_SEPARATORS_IN_PARTITIONS, this.branches, this.branch), false);
return AvroUtils.serializeAsPath(this.partition.get(), includePartitionerFieldNames, removePathSeparators).toString();
} else {
return null;
}
}
protected String getPartitionedFileName(State properties, String originalFileName) {
return new Path(
getPartitionPath(properties),
originalFileName).toString();
}
@Override
public boolean validatePartitionSchema(Schema partitionSchema) {
return true;
}
/**
* Get list of encoders configured for the writer.
*/
public synchronized List<StreamCodec> getEncoders() {
if (encoders == null) {
encoders = buildEncoders();
}
return encoders;
}
/**
* Build and cache encoders for the writer based on configured options as encoder
* construction can potentially be expensive.
*/
protected List<StreamCodec> buildEncoders() {
// Should be overridden by subclasses if their associated writers are
// encoder aware
return Collections.emptyList();
}
}
| 3,055 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/HiveWritableHdfsDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.serde2.Serializer;
import org.apache.hadoop.io.Writable;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveSerDeWrapper;
/**
* A {@link DataWriterBuilder} for building {@link HiveWritableHdfsDataWriter}.
*
* If properties {@link #WRITER_WRITABLE_CLASS}, {@link #WRITER_OUTPUT_FORMAT_CLASS} are both specified, their values
* will be used to create {@link HiveWritableHdfsDataWriter}. Otherwise, property
* {@link HiveSerDeWrapper#SERDE_SERIALIZER_TYPE} is required, which will be used to create a
* {@link HiveSerDeWrapper} that contains the information needed to create {@link HiveWritableHdfsDataWriter}.
*
* @author Ziyang Liu
*/
public class HiveWritableHdfsDataWriterBuilder<S> extends FsDataWriterBuilder<S, Writable> {
public static final String WRITER_WRITABLE_CLASS = "writer.writable.class";
public static final String WRITER_OUTPUT_FORMAT_CLASS = "writer.output.format.class";
@SuppressWarnings("deprecation")
@Override
public DataWriter<Writable> build() throws IOException {
Preconditions.checkNotNull(this.destination);
Preconditions.checkArgument(!Strings.isNullOrEmpty(this.writerId));
State properties = this.destination.getProperties();
if (!properties.contains(WRITER_WRITABLE_CLASS) || !properties.contains(WRITER_OUTPUT_FORMAT_CLASS)) {
HiveSerDeWrapper serializer = HiveSerDeWrapper.getSerializer(properties);
properties.setProp(WRITER_WRITABLE_CLASS, ((Serializer) serializer.getSerDe()).getSerializedClass().getName());
properties.setProp(WRITER_OUTPUT_FORMAT_CLASS, serializer.getOutputFormatClassName());
}
return new HiveWritableHdfsDataWriter(this, properties);
}
@Override
public boolean validatePartitionSchema(Schema partitionSchema) {
return true;
}
}
| 3,056 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/PartitionedDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.SpeculativeAttemptAwareConstruct;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.exception.NonTransientException;
import org.apache.gobblin.instrumented.writer.InstrumentedDataWriterDecorator;
import org.apache.gobblin.instrumented.writer.InstrumentedPartitionedDataWriterDecorator;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.FlushControlMessage;
import org.apache.gobblin.stream.MetadataUpdateControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.FinalState;
import org.apache.gobblin.writer.partitioner.WriterPartitioner;
/**
* {@link DataWriter} that partitions data using a partitioner, instantiates appropriate writers, and sends records to
* the chosen writer.
* @param <S> schema type.
* @param <D> record type.
*/
@Slf4j
public class PartitionedDataWriter<S, D> extends WriterWrapper<D> implements FinalState, SpeculativeAttemptAwareConstruct, WatermarkAwareWriter<D> {
public static final String WRITER_LATEST_SCHEMA = "writer.latest.schema";
//Config to control when a writer is evicted from Partitioned data writer cache.
// NOTE: this config must be set only in streaming mode. For batch mode, setting this config will result
// in incorrect behavior.
public static final String PARTITIONED_WRITER_CACHE_TTL_SECONDS = "partitionedDataWriter.cache.ttl.seconds";
public static final Long DEFAULT_PARTITIONED_WRITER_CACHE_TTL_SECONDS = Long.MAX_VALUE;
public static final String PARTITIONED_WRITER_WRITE_TIMEOUT_SECONDS = "partitionedDataWriter.write.timeout.seconds";
public static final Long DEFAULT_PARTITIONED_WRITER_WRITE_TIMEOUT_SECONDS = Long.MAX_VALUE;
public static final String CURRENT_PARTITIONED_WRITERS_COUNTER = "partitionedDataWriter.counter";
private static final GenericRecord NON_PARTITIONED_WRITER_KEY =
new GenericData.Record(SchemaBuilder.record("Dummy").fields().endRecord());
private int writerIdSuffix = 0;
private final String baseWriterId;
private final State state;
private final int branchId;
private final Optional<WriterPartitioner> partitioner;
@Getter
@VisibleForTesting
private final LoadingCache<GenericRecord, DataWriter<D>> partitionWriters;
private final Optional<PartitionAwareDataWriterBuilder> builder;
private final DataWriterBuilder writerBuilder;
private final boolean shouldPartition;
private final Closer closer;
private final ControlMessageHandler controlMessageHandler;
private boolean isSpeculativeAttemptSafe;
private boolean isWatermarkCapable;
private long writeTimeoutInterval;
private ScheduledExecutorService cacheCleanUpExecutor;
//Counters to keep track of records and bytes of writers which have been evicted from cache.
@Getter
@VisibleForTesting
private long totalRecordsFromEvictedWriters;
@Getter
@VisibleForTesting
private long totalBytesFromEvictedWriters;
private ExecutorService createWriterPool;
public PartitionedDataWriter(DataWriterBuilder<S, D> builder, final State state)
throws IOException {
this.state = state;
this.branchId = builder.branch;
this.isSpeculativeAttemptSafe = true;
this.isWatermarkCapable = true;
this.baseWriterId = builder.getWriterId();
this.createWriterPool = Executors.newSingleThreadExecutor();
this.closer = Closer.create();
this.writerBuilder = builder;
this.controlMessageHandler = new PartitionDataWriterMessageHandler();
if(builder.schema != null) {
this.state.setProp(WRITER_LATEST_SCHEMA, builder.getSchema());
}
long cacheExpiryInterval = this.state.getPropAsLong(PARTITIONED_WRITER_CACHE_TTL_SECONDS, DEFAULT_PARTITIONED_WRITER_CACHE_TTL_SECONDS);
this.writeTimeoutInterval = this.state.getPropAsLong(PARTITIONED_WRITER_WRITE_TIMEOUT_SECONDS,
DEFAULT_PARTITIONED_WRITER_WRITE_TIMEOUT_SECONDS);
// Bound the timeout value to avoid data loss when slow write happening
this.writeTimeoutInterval = Math.min(this.writeTimeoutInterval, cacheExpiryInterval / 3 * 2);
log.debug("PartitionedDataWriter: Setting cache expiry interval to {} seconds", cacheExpiryInterval);
this.partitionWriters = CacheBuilder.newBuilder()
.expireAfterAccess(cacheExpiryInterval, TimeUnit.SECONDS)
.removalListener(new RemovalListener<GenericRecord, DataWriter<D>>() {
@Override
public void onRemoval(RemovalNotification<GenericRecord, DataWriter<D>> notification) {
synchronized (PartitionedDataWriter.this) {
if (notification.getValue() != null) {
try {
DataWriter<D> writer = notification.getValue();
totalRecordsFromEvictedWriters += writer.recordsWritten();
totalBytesFromEvictedWriters += writer.bytesWritten();
writer.close();
} catch (IOException e) {
log.error("Exception {} encountered when closing data writer on cache eviction", e);
//Should propagate the exception to avoid committing/publishing corrupt files.
throw new RuntimeException(e);
}
}
}
}
}).build(new CacheLoader<GenericRecord, DataWriter<D>>() {
@Override
public DataWriter<D> load(final GenericRecord key)
throws Exception {
/* wrap the data writer to allow the option to close the writer on flush */
return new InstrumentedPartitionedDataWriterDecorator<>(
new CloseOnFlushWriterWrapper<D>(new Supplier<DataWriter<D>>() {
@Override
public DataWriter<D> get() {
try {
log.info(String.format("Adding one more writer to loading cache of existing writer "
+ "with size = %d", partitionWriters.size()));
Future<DataWriter<D>> future = createWriterPool.submit(() -> createPartitionWriter(key));
state.setProp(CURRENT_PARTITIONED_WRITERS_COUNTER, partitionWriters.size() + 1);
return future.get(writeTimeoutInterval, TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Error creating writer", e);
} catch (TimeoutException e) {
throw new RuntimeException(String.format("Failed to create writer due to timeout. The operation timed out after %s seconds.", writeTimeoutInterval), e);
}
}
}, state), state, key);
}
});
//Schedule a DataWriter cache clean up operation, since LoadingCache may keep the object
// in memory even after it has been evicted from the cache.
if (cacheExpiryInterval < Long.MAX_VALUE) {
this.cacheCleanUpExecutor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("CacheCleanupExecutor")));
this.cacheCleanUpExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
PartitionedDataWriter.this.partitionWriters.cleanUp();
}
}, 0, cacheExpiryInterval, TimeUnit.SECONDS);
}
if (state.contains(ConfigurationKeys.WRITER_PARTITIONER_CLASS)) {
Preconditions.checkArgument(builder instanceof PartitionAwareDataWriterBuilder, String
.format("%s was specified but the writer %s does not support partitioning.",
ConfigurationKeys.WRITER_PARTITIONER_CLASS, builder.getClass().getCanonicalName()));
try {
this.shouldPartition = true;
this.builder = Optional.of(PartitionAwareDataWriterBuilder.class.cast(builder));
this.partitioner = Optional.of(WriterPartitioner.class.cast(ConstructorUtils
.invokeConstructor(Class.forName(state.getProp(ConfigurationKeys.WRITER_PARTITIONER_CLASS)), state,
builder.getBranches(), builder.getBranch())));
Preconditions
.checkArgument(this.builder.get().validatePartitionSchema(this.partitioner.get().partitionSchema()), String
.format("Writer %s does not support schema from partitioner %s",
builder.getClass().getCanonicalName(), this.partitioner.getClass().getCanonicalName()));
} catch (ReflectiveOperationException roe) {
throw new IOException(roe);
}
} else {
this.shouldPartition = false;
// Support configuration to close the DataWriter on flush to allow publishing intermediate results in a task
CloseOnFlushWriterWrapper closeOnFlushWriterWrapper =
new CloseOnFlushWriterWrapper<D>(new Supplier<DataWriter<D>>() {
@Override
public DataWriter<D> get() {
try {
return builder.withWriterId(PartitionedDataWriter.this.baseWriterId + "_"
+ PartitionedDataWriter.this.writerIdSuffix++).build();
} catch (IOException e) {
throw new RuntimeException("Error creating writer", e);
}
}
}, state);
DataWriter<D> dataWriter = (DataWriter)closeOnFlushWriterWrapper.getDecoratedObject();
InstrumentedDataWriterDecorator<D> writer =
this.closer.register(new InstrumentedDataWriterDecorator<>(closeOnFlushWriterWrapper, state));
this.isSpeculativeAttemptSafe = this.isDataWriterForPartitionSafe(dataWriter);
this.isWatermarkCapable = this.isDataWriterWatermarkCapable(dataWriter);
this.partitionWriters.put(NON_PARTITIONED_WRITER_KEY, writer);
this.partitioner = Optional.absent();
this.builder = Optional.absent();
}
}
private boolean isDataWriterWatermarkCapable(DataWriter<D> dataWriter) {
return (dataWriter instanceof WatermarkAwareWriter) && (((WatermarkAwareWriter) dataWriter).isWatermarkCapable());
}
@Override
public void writeEnvelope(RecordEnvelope<D> recordEnvelope) throws IOException {
try {
GenericRecord partition = getPartitionForRecord(recordEnvelope.getRecord());
DataWriter<D> writer = this.partitionWriters.get(partition);
long startTime = System.currentTimeMillis();
writer.writeEnvelope(recordEnvelope);
long timeForWriting = System.currentTimeMillis() - startTime;
// If the write take a long time, which is 1/3 of cache expiration time, we fail the writer to avoid data loss
// and further slowness on the same HDFS block
if (timeForWriting / 1000 > this.writeTimeoutInterval ) {
//Use NonTransientException to avoid writer retry, in this case, retry will also cause data loss
throw new NonTransientException(String.format("Write record took %s s, but threshold is %s s",
timeForWriting / 1000, writeTimeoutInterval));
}
} catch (ExecutionException ee) {
throw new IOException(ee);
}
}
private GenericRecord getPartitionForRecord(D record) {
return this.shouldPartition ? this.partitioner.get().partitionForRecord(record) : NON_PARTITIONED_WRITER_KEY;
}
@Override
public synchronized void commit()
throws IOException {
int writersCommitted = 0;
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
try {
entry.getValue().commit();
writersCommitted++;
} catch (Throwable throwable) {
log.error(String.format("Failed to commit writer for partition %s.", entry.getKey()), throwable);
}
}
if (writersCommitted < this.partitionWriters.asMap().size()) {
throw new IOException("Failed to commit all writers.");
}
}
@Override
public synchronized void cleanup()
throws IOException {
int writersCleanedUp = 0;
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
try {
entry.getValue().cleanup();
writersCleanedUp++;
} catch (Throwable throwable) {
log.error(String.format("Failed to cleanup writer for partition %s.", entry.getKey()), throwable);
}
}
if (writersCleanedUp < this.partitionWriters.asMap().size()) {
throw new IOException("Failed to clean up all writers.");
}
}
@Override
public synchronized long recordsWritten() {
long totalRecords = 0;
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
totalRecords += entry.getValue().recordsWritten();
}
return totalRecords + this.totalRecordsFromEvictedWriters;
}
@Override
public synchronized long bytesWritten()
throws IOException {
long totalBytes = 0;
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
totalBytes += entry.getValue().bytesWritten();
}
return totalBytes + this.totalBytesFromEvictedWriters;
}
@Override
public synchronized void close()
throws IOException {
try {
serializePartitionInfoToState();
} finally {
closeWritersInCache();
this.createWriterPool.shutdown();
this.closer.close();
}
}
private void closeWritersInCache() throws IOException {
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
entry.getValue().close();
}
}
private DataWriter<D> createPartitionWriter(GenericRecord partition)
throws IOException {
if (!this.builder.isPresent()) {
throw new IOException("Writer builder not found. This is an error in the code.");
}
DataWriter dataWriter = this.builder.get().forPartition(partition).withWriterId(this.baseWriterId + "_" + this.writerIdSuffix++)
.build();
this.isSpeculativeAttemptSafe = this.isSpeculativeAttemptSafe && this.isDataWriterForPartitionSafe(dataWriter);
this.isWatermarkCapable = this.isWatermarkCapable && this.isDataWriterWatermarkCapable(dataWriter);
return dataWriter;
}
@Override
public synchronized State getFinalState() {
State state = new State();
try {
for (Map.Entry<GenericRecord, DataWriter<D>> entry : this.partitionWriters.asMap().entrySet()) {
if (entry.getValue() instanceof FinalState) {
State partitionFinalState = ((FinalState) entry.getValue()).getFinalState();
if (this.shouldPartition) {
for (String key : partitionFinalState.getPropertyNames()) {
// Prevent overwriting final state across writers
partitionFinalState.setProp(key + "_" + AvroUtils.serializeAsPath(entry.getKey(), false, true),
partitionFinalState.getProp(key));
}
}
state.addAll(partitionFinalState);
}
}
state.setProp("RecordsWritten", recordsWritten());
state.setProp("BytesWritten", bytesWritten());
} catch (Exception exception) {
log.warn("Failed to get final state.", exception);
// If Writer fails to return bytesWritten, it might not be implemented, or implemented incorrectly.
// Omit property instead of failing.
}
return state;
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.isSpeculativeAttemptSafe;
}
private boolean isDataWriterForPartitionSafe(DataWriter dataWriter) {
return dataWriter instanceof SpeculativeAttemptAwareConstruct
&& ((SpeculativeAttemptAwareConstruct) dataWriter).isSpeculativeAttemptSafe();
}
@Override
public boolean isWatermarkCapable() {
return this.isWatermarkCapable;
}
@Override
public ControlMessageHandler getMessageHandler() {
return this.controlMessageHandler;
}
/**
* A {@link ControlMessageHandler} that clones the message and lets each writer handle it.
*/
private class PartitionDataWriterMessageHandler implements ControlMessageHandler {
@Override
public void handleMessage(ControlMessage message) {
StreamEntity.ForkCloner cloner = message.forkCloner();
// update the schema used to build writers
if (message instanceof MetadataUpdateControlMessage) {
PartitionedDataWriter.this.writerBuilder.withSchema(((MetadataUpdateControlMessage) message)
.getGlobalMetadata().getSchema());
state.setProp(WRITER_LATEST_SCHEMA, ((MetadataUpdateControlMessage) message)
.getGlobalMetadata().getSchema());
} else if (message instanceof FlushControlMessage){
//Add Partition info to state to report partition level lineage events on Flush
serializePartitionInfoToState();
}
synchronized (PartitionedDataWriter.this) {
for (DataWriter writer : PartitionedDataWriter.this.partitionWriters.asMap().values()) {
ControlMessage cloned = (ControlMessage) cloner.getClone();
writer.getMessageHandler().handleMessage(cloned);
}
}
cloner.close();
}
}
/**
* Get the serialized key to partitions info in {@link #state}
*/
public static String getPartitionsKey(int branchId) {
return String.format("writer.%d.partitions", branchId);
}
/**
* Serialize partitions info to {@link #state} if they are any
*/
private void serializePartitionInfoToState() {
List<PartitionDescriptor> descriptors = new ArrayList<>();
for (DataWriter writer : partitionWriters.asMap().values()) {
Descriptor descriptor = writer.getDataDescriptor();
if (null == descriptor) {
log.warn("Drop partition info as writer {} returns a null PartitionDescriptor", writer.toString());
continue;
}
if (!(descriptor instanceof PartitionDescriptor)) {
log.warn("Drop partition info as writer {} does not return a PartitionDescriptor", writer.toString());
continue;
}
descriptors.add((PartitionDescriptor)descriptor);
}
if (descriptors.size() > 0) {
state.setProp(getPartitionsKey(branchId), PartitionDescriptor.toPartitionJsonList(descriptors));
} else {
log.info("Partitions info not available. Will not serialize partitions");
}
}
/**
* Get the partition info of a work unit from the {@code state}. Then partition info will be removed from the
* {@code state} to avoid persisting useless information
*
* <p>
* In Gobblin, only the {@link PartitionedDataWriter} knows all partitions written for a work unit. Each partition
* {@link DataWriter} decides the actual form of a dataset partition
* </p>
*/
public static List<PartitionDescriptor> getPartitionInfoAndClean(State state, int branchId) {
String partitionsKey = getPartitionsKey(branchId);
String json = state.getProp(partitionsKey);
if (Strings.isNullOrEmpty(json)) {
return Lists.newArrayList();
}
state.removeProp(partitionsKey);
return PartitionDescriptor.fromPartitionJsonList(json);
}
}
| 3,057 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/SimpleDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.google.common.base.Preconditions;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.compression.CompressionConfigParser;
import org.apache.gobblin.compression.CompressionFactory;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
/**
* A {@link DataWriterBuilder} for building {@link DataWriter} that writes bytes.
*
* @author akshay@nerdwallet.com
*/
public class SimpleDataWriterBuilder extends FsDataWriterBuilder<String, Object> {
/**
* Build a {@link org.apache.gobblin.writer.DataWriter}.
*
* @return the built {@link org.apache.gobblin.writer.DataWriter}
* @throws java.io.IOException if there is anything wrong building the writer
*/
@Override
public DataWriter<Object> build() throws IOException {
return new MetadataWriterWrapper<byte[]>(new SimpleDataWriter(this, this.destination.getProperties()),
byte[].class,
this.branches,
this.branch,
this.destination.getProperties());
}
@Override
protected List<StreamCodec> buildEncoders() {
Preconditions.checkNotNull(this.destination, "Destination must be set before building encoders");
List<StreamCodec> encoders = new ArrayList<>();
// TODO: refactor this when capability support comes back in
// Compress first since compressing encrypted data will give no benefit
Map<String, Object> compressionConfig =
CompressionConfigParser.getConfigForBranch(this.destination.getProperties(), this.branches, this.branch);
if (compressionConfig != null) {
encoders.add(CompressionFactory.buildStreamCompressor(compressionConfig));
}
Map<String, Object> encryptionConfig = EncryptionConfigParser
.getConfigForBranch(EncryptionConfigParser.EntityType.WRITER, this.destination.getProperties(), this.branches,
this.branch);
if (encryptionConfig != null) {
encoders.add(EncryptionFactory.buildStreamCryptoProvider(encryptionConfig));
}
return encoders;
}
}
| 3,058 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/SchemaBasedPartitionedDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.avro.Schema;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.writer.partitioner.SchemaBasedWriterPartitioner;
/**
* A {@link DataWriterBuilder} that uses name field of {@link #schema} in path name and overrides {@link #getSchema()}
* to use {@link #partition}
*
* Must be used with {@link SchemaBasedWriterPartitioner}
*/
public class SchemaBasedPartitionedDataWriterBuilder extends AvroDataWriterBuilder {
/**
* Use the name field of {@link #schema} to partition path
*/
@Override
protected String getPartitionedFileName(State properties, String originalFileName) {
Schema schema = this.getSchema();
if (schema != null) {
return new Path(schema.getName(), originalFileName).toString();
} else {
return originalFileName;
}
}
/**
* Get schema from {@link #partition} since the correct schema is not known at creation
*/
public Schema getSchema() {
if (this.partition.isPresent()) {
String schemaString = this.partition.get().get(SchemaBasedWriterPartitioner.SCHEMA_STRING).toString();
this.withSchema(new Schema.Parser().parse(schemaString));
return this.schema;
} else {
return null;
}
}
@Override
public boolean validatePartitionSchema(Schema partitionSchema) {
return partitionSchema.getField(SchemaBasedWriterPartitioner.SCHEMA_STRING) != null;
}
}
| 3,059 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/ConsoleWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
/**
* A simple console writer that prints the record to stdout
*/
@Slf4j
public class ConsoleWriter<D> implements WatermarkAwareWriter<D> {
private long _recordsWritten;
public ConsoleWriter() {
_recordsWritten = 0;
}
@Override
public void write(D record)
throws IOException {
System.out.println(record);
if (record != null) {
log.info(record.toString());
} else {
log.info("null record");
}
++_recordsWritten;
}
@Override
public void commit()
throws IOException {
log.debug("Commit called.");
}
@Override
public void cleanup()
throws IOException {
log.debug("Cleanup called.");
}
@Override
public long recordsWritten() {
return _recordsWritten;
}
@Override
public long bytesWritten()
throws IOException {
return 0;
}
@Override
public void close()
throws IOException {
log.debug("Close called");
}
/**
* Flush console output
*/
@Override
public void flush() throws IOException {
System.out.flush();
}
}
| 3,060 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/AvroDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* A {@link DataWriterBuilder} for building {@link DataWriter} that writes in Avro format.
*
* @author Yinan Li
*/
public class AvroDataWriterBuilder extends FsDataWriterBuilder<Schema, GenericRecord> {
@Override
public DataWriter<GenericRecord> build() throws IOException {
Preconditions.checkNotNull(this.destination);
Preconditions.checkArgument(!Strings.isNullOrEmpty(this.writerId));
Preconditions.checkNotNull(this.schema);
Preconditions.checkArgument(this.format == WriterOutputFormat.AVRO);
switch (this.destination.getType()) {
case HDFS:
return new AvroHdfsDataWriter(this, this.destination.getProperties());
default:
throw new RuntimeException("Unknown destination type: " + this.destination.getType());
}
}
}
| 3,061 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/DataWriterWrapperBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.State;
/**
* The purpose of this class is to add more feature to DataWriter such as retry or throttle.
* Note that RetryWriter will be always applied.
*/
public class DataWriterWrapperBuilder<D> extends DataWriterBuilder<Void, D> {
private static final Logger LOG = LoggerFactory.getLogger(DataWriterWrapperBuilder.class);
private final DataWriter<D> writer;
private final State state;
public DataWriterWrapperBuilder(DataWriter<D> writer, State state) {
this.writer = writer;
this.state = state;
}
/**
* Build the writer with adding throttling (if requested), and retrying feature on top of the writer.
* {@inheritDoc}
* @see org.apache.gobblin.writer.DataWriterBuilder#build()
*/
@Override
public DataWriter<D> build() throws IOException {
DataWriter<D> wrapped = writer;
if (state.contains(ThrottleWriter.WRITER_LIMIT_RATE_LIMIT_KEY)
&& state.contains(ThrottleWriter.WRITER_THROTTLE_TYPE_KEY)) {
wrapped = new ThrottleWriter<>(wrapped, state);
}
if (state.getPropAsBoolean(RetryWriter.RETRY_WRITER_ENABLED, true)) {
wrapped = new RetryWriter<>(wrapped, state);
}
return wrapped;
}
}
| 3,062 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/MetadataAwareWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.metadata.types.GlobalMetadata;
/**
* Represents a Writer that is metadata aware. Metadata aware writers
* specify default {@link GlobalMetadata} that will be added to all records that flow
* through the pipeline they are a part of.
*
* This allows a writer to communicate with the MetadataWriterWrapper that surrounds it
* and is the one responsible for processing any metadata that flows through the conversion pipeline.
*/
public interface MetadataAwareWriter {
/**
* Get default metadata that will be attached to records that pass through this writer.
* For example, if the writer always gzip's data that passes through it, it would implement
* this method and return a new GlobalMetadata object record with transferEncoding: ['gzip'].
*
* This default metadata will be merged with the RecordWithMetadata that is added to each record
* by the source and converters in the pipeline.
*/
GlobalMetadata getDefaultMetadata();
}
| 3,063 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/SimpleDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.apache.commons.lang3.ArrayUtils;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* An implementation of {@link DataWriter} that writes bytes directly to HDFS.
*
* This class accepts two new configuration parameters:
* <ul>
* <li>{@link ConfigurationKeys#SIMPLE_WRITER_PREPEND_SIZE} is a boolean configuration option. If true, for each record,
* it will write out a big endian long representing the record size and then write the record. i.e. the file format
* will be the following:
* r := >long<>record<
* file := empty | r file
* <li>{@link ConfigurationKeys#SIMPLE_WRITER_DELIMITER} accepts a byte value. If specified, this byte will be used
* as a separator between records. If unspecified, no delimiter will be used between records.
* </ul>
* @author akshay@nerdwallet.com
*/
public class SimpleDataWriter extends FsDataWriter<byte[]> {
private final Optional<Byte> recordDelimiter; // optional byte to place between each record write
private final boolean prependSize;
private int recordsWritten;
private int bytesWritten;
private final OutputStream stagingFileOutputStream;
public SimpleDataWriter(SimpleDataWriterBuilder builder, State properties)
throws IOException {
super(builder, properties);
String delim;
if ((delim = properties.getProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, null)) == null || delim.length() == 0) {
this.recordDelimiter = Optional.absent();
} else {
this.recordDelimiter = Optional.of(delim.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING)[0]);
}
this.prependSize = properties.getPropAsBoolean(ConfigurationKeys.SIMPLE_WRITER_PREPEND_SIZE, false);
this.recordsWritten = 0;
this.bytesWritten = 0;
this.stagingFileOutputStream = createStagingFileOutputStream();
setStagingFileGroup();
}
/**
* Write a source record to the staging file
*
* @param record data record to write
* @throws java.io.IOException if there is anything wrong writing the record
*/
@Override
public void write(byte[] record) throws IOException {
Preconditions.checkNotNull(record);
byte[] toWrite = record;
if (this.recordDelimiter.isPresent()) {
toWrite = Arrays.copyOf(record, record.length + 1);
toWrite[toWrite.length - 1] = this.recordDelimiter.get();
}
if (this.prependSize) {
long recordSize = toWrite.length;
ByteBuffer buf = ByteBuffer.allocate(Longs.BYTES);
buf.putLong(recordSize);
toWrite = ArrayUtils.addAll(buf.array(), toWrite);
}
this.stagingFileOutputStream.write(toWrite);
this.bytesWritten += toWrite.length;
this.recordsWritten++;
}
/**
* Get the number of records written.
*
* @return number of records written
*/
@Override
public long recordsWritten() {
return this.recordsWritten;
}
/**
* Get the number of bytes written.
*
* @return number of bytes written
*/
@Override
public long bytesWritten() throws IOException {
return this.bytesWritten;
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == SimpleDataWriter.class;
}
/**
* Flush the staging file
* @throws IOException
*/
@Override
public void flush() throws IOException {
this.stagingFileOutputStream.flush();
}
}
| 3,064 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/ConsoleWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
public class ConsoleWriterBuilder<D> extends DataWriterBuilder<String,D> {
@Override
public DataWriter<D> build()
throws IOException {
return new ConsoleWriter();
}
}
| 3,065 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/AvroHdfsDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.WriterUtils;
/**
* An extension to {@link FsDataWriter} that writes in Avro format in the form of {@link GenericRecord}s.
*
* <p>
* This implementation allows users to specify the {@link CodecFactory} to use through the configuration
* property {@link ConfigurationKeys#WRITER_CODEC_TYPE}. By default, the deflate codec is used.
* </p>
*
* @author Yinan Li
*/
public class AvroHdfsDataWriter extends FsDataWriter<GenericRecord> {
private final Schema schema;
private final OutputStream stagingFileOutputStream;
private final DatumWriter<GenericRecord> datumWriter;
private final DataFileWriter<GenericRecord> writer;
private final boolean skipNullRecord;
// Number of records successfully written
protected final AtomicLong count = new AtomicLong(0);
public AvroHdfsDataWriter(FsDataWriterBuilder<Schema, GenericRecord> builder, State state) throws IOException {
super(builder, state);
CodecFactory codecFactory = WriterUtils.getCodecFactory(
Optional.fromNullable(this.properties.getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_CODEC_TYPE, this.numBranches, this.branchId))),
Optional.fromNullable(this.properties.getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_DEFLATE_LEVEL, this.numBranches, this.branchId))));
this.schema = builder.getSchema();
this.stagingFileOutputStream = createStagingFileOutputStream();
this.datumWriter = new GenericDatumWriter<>();
this.writer = this.closer.register(createDataFileWriter(codecFactory));
this.skipNullRecord = state.getPropAsBoolean(ConfigurationKeys.WRITER_SKIP_NULL_RECORD, false);
}
public FileSystem getFileSystem() {
return this.fs;
}
@Override
public void write(GenericRecord record) throws IOException {
if (skipNullRecord && record == null) {
return;
}
Preconditions.checkNotNull(record);
this.writer.append(record);
// Only increment when write is successful
this.count.incrementAndGet();
}
@Override
public long recordsWritten() {
return this.count.get();
}
/**
* Create a new {@link DataFileWriter} for writing Avro records.
*
* @param codecFactory a {@link CodecFactory} object for building the compression codec
* @throws IOException if there is something wrong creating a new {@link DataFileWriter}
*/
private DataFileWriter<GenericRecord> createDataFileWriter(CodecFactory codecFactory) throws IOException {
@SuppressWarnings("resource")
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(this.datumWriter);
writer.setCodec(codecFactory);
// Open the file and return the DataFileWriter
return writer.create(this.schema, this.stagingFileOutputStream);
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == AvroHdfsDataWriter.class;
}
/**
* Flush the writer
* @throws IOException
*/
@Override
public void flush() throws IOException {
this.writer.flush();
}
}
| 3,066 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/CloseOnFlushWriterWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.RetryerBuilder;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.records.FlushControlMessageHandler;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.stream.FlushControlMessage;
import org.apache.gobblin.stream.MetadataUpdateControlMessage;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.FinalState;
/**
* The {@link CloseOnFlushWriterWrapper} closes the wrapped writer on flush and creates a new writer using a
* {@link Supplier} on the next write. After the writer is closed the reference is still available for inspection until
* a new writer is created on the next write.
* @param <D>
*/
public class CloseOnFlushWriterWrapper<D> extends WriterWrapper<D> implements Decorator, FinalState, Retriable {
// Used internally to enable closing of the writer on flush
public static final String WRITER_CLOSE_ON_FLUSH_KEY = ConfigurationKeys.WRITER_PREFIX + ".closeOnFlush";
public static final boolean DEFAULT_WRITER_CLOSE_ON_FLUSH = false;
public static final String WRITER_CLOSE_ON_METADATA_UPDATE = ConfigurationKeys.WRITER_PREFIX + ".closeOnMetadataUpdate";
public static final boolean DEFAULT_CLOSE_ON_METADATA_UPDATE = true;
private static final Logger LOG = LoggerFactory.getLogger(CloseOnFlushWriterWrapper.class);
private final State state;
private DataWriter<D> writer;
private final Supplier<DataWriter<D>> writerSupplier;
private boolean closed;
private boolean committed;
// is the close functionality enabled?
private final boolean closeOnFlush;
private final ControlMessageHandler controlMessageHandler;
private final boolean closeOnMetadataUpdate;
public CloseOnFlushWriterWrapper(Supplier<DataWriter<D>> writerSupplier, State state) {
Preconditions.checkNotNull(state, "State is required.");
this.state = state;
this.writerSupplier = writerSupplier;
this.writer = writerSupplier.get();
this.closed = false;
this.closeOnFlush = this.state.getPropAsBoolean(WRITER_CLOSE_ON_FLUSH_KEY,
DEFAULT_WRITER_CLOSE_ON_FLUSH);
this.controlMessageHandler = new CloseOnFlushWriterMessageHandler();
this.closeOnMetadataUpdate = this.state.getPropAsBoolean(WRITER_CLOSE_ON_METADATA_UPDATE,
DEFAULT_CLOSE_ON_METADATA_UPDATE);
}
@Override
public Object getDecoratedObject() {
return this.writer;
}
@Override
public void writeEnvelope(RecordEnvelope<D> record) throws IOException {
// get a new writer if last one was closed
if (this.closed) {
this.writer = writerSupplier.get();
this.closed = false;
this.committed = false;
}
this.writer.writeEnvelope(record);
}
@Override
public void close() throws IOException {
if (!this.closed) {
writer.close();
this.closed = true;
}
}
@Override
public void commit() throws IOException {
if (!this.committed) {
writer.commit();
this.committed = true;
}
}
@Override
public void cleanup() throws IOException {
writer.cleanup();
}
@Override
public long recordsWritten() {
return writer.recordsWritten();
}
@Override
public long bytesWritten() throws IOException {
return writer.bytesWritten();
}
@Override
public RetryerBuilder<Void> getRetryerBuilder() {
if (writer instanceof Retriable) {
return ((Retriable) writer).getRetryerBuilder();
}
return RetryWriter.createRetryBuilder(state);
}
@Override
public State getFinalState() {
State state = new State();
if (this.writer instanceof FinalState) {
state.addAll(((FinalState)this.writer).getFinalState());
} else {
LOG.warn("Wrapped writer does not implement FinalState: " + this.writer.getClass());
}
return state;
}
@Override
public Descriptor getDataDescriptor() {
return writer.getDataDescriptor();
}
@Override
public ControlMessageHandler getMessageHandler() {
return this.controlMessageHandler;
}
/**
* The writer will be flushed. It will also be committed and closed if configured to be closed on flush.
* @throws IOException
*/
@Override
public void flush() throws IOException {
flush(this.closeOnFlush);
}
private void flush(boolean close) throws IOException {
// nothing to flush, so don't call flush on the underlying writer since it may not support flush after close
if (this.closed) {
return;
}
this.writer.flush();
// commit data then close the writer
if (close) {
commit();
close();
}
}
/**
* A {@link ControlMessageHandler} that handles closing on flush
*/
private class CloseOnFlushWriterMessageHandler implements ControlMessageHandler {
@Override
public void handleMessage(ControlMessage message) {
// nothing to do if already closed, so don't call then underlying handler since it may not work on closed objects
if (CloseOnFlushWriterWrapper.this.closed) {
return;
}
ControlMessageHandler underlyingHandler = CloseOnFlushWriterWrapper.this.writer.getMessageHandler();
// let underlying writer handle the control messages first
underlyingHandler.handleMessage(message);
// Handle close after flush logic. The file is closed if requested by the flush or the configuration.
if ((message instanceof FlushControlMessage &&
(CloseOnFlushWriterWrapper.this.closeOnFlush ||
((FlushControlMessage) message).getFlushType() == FlushControlMessage.FlushType.FLUSH_AND_CLOSE)) ||
(message instanceof MetadataUpdateControlMessage && CloseOnFlushWriterWrapper.this.closeOnMetadataUpdate)) {
try {
// avoid flushing again
if (underlyingHandler instanceof FlushControlMessageHandler) {
commit();
close();
} else {
flush(true);
}
} catch (IOException e) {
throw new RuntimeException("Could not flush when handling FlushControlMessage", e);
}
}
}
}
}
| 3,067 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/AbstractAsyncDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import javax.annotation.concurrent.ThreadSafe;
import org.apache.gobblin.async.AsyncDataDispatcher;
import org.apache.gobblin.async.BufferedRecord;
/**
* Base class to write data asynchronously. It is an {@link AsyncDataDispatcher} on {@link BufferedRecord}, which
* wraps a record and its callback.
*
* @param <D> type of record
*/
@ThreadSafe
public abstract class AbstractAsyncDataWriter<D> extends AsyncDataDispatcher<BufferedRecord<D>> implements AsyncDataWriter<D> {
public static final int DEFAULT_BUFFER_CAPACITY = 10000;
public AbstractAsyncDataWriter(int capacity) {
super(capacity);
}
/**
* Asynchronously write the record with a callback
*/
@Override
public final Future<WriteResponse> write(D record, @Nullable WriteCallback callback) {
FutureWrappedWriteCallback wrappedWriteCallback = new FutureWrappedWriteCallback(callback);
BufferedRecord<D> bufferedRecord = new BufferedRecord<>(record, wrappedWriteCallback);
put(bufferedRecord);
return wrappedWriteCallback;
}
@Override
public void close()
throws IOException {
try {
flush();
} finally {
terminate();
}
}
/**
* Wait for a buffer empty occurrence
*/
@Override
public void flush()
throws IOException {
waitForBufferEmpty();
}
}
| 3,068 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/MetadataWriterWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Set;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metadata.GlobalMetadataCollector;
import org.apache.gobblin.metadata.types.GlobalMetadata;
import org.apache.gobblin.type.RecordWithMetadata;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Wraps an existing {@link DataWriter} and makes it metadata aware. This class is responsible for doing the following:
*
* 1. If the underlying writer is {@link MetadataAwareWriter}, query it to get default metadata to add to any
* incoming records.
* 2. Process each incoming record:
* 2a. If it is a {@link RecordWithMetadata}, process the metadata and pass either the underlying record or the
* {@link RecordWithMetadata}, depending on the writer class type, to the wrapped writer.
* 2b. If it is a standard record type attach any default metadata and pass the record to the wrapped writer.
* 3. On {@link #commit()}, publish the combined metadata output to JobState property so it can be published.
*/
public class MetadataWriterWrapper<D> implements DataWriter<Object> {
private final DataWriter wrappedWriter;
private final Class<? extends D> writerDataClass;
private final int numBranches;
private final int branchId;
private final State properties;
private final GlobalMetadataCollector metadataCollector;
/**
* Initialize a new metadata wrapper.
* @param wrappedWriter Writer to wrap
* @param writerDataClass Class of data the writer accepts
* @param numBranches # of branches in state
* @param branchId Branch this writer is wrapping
* @param writerProperties Configuration properties
*/
public MetadataWriterWrapper(DataWriter<D> wrappedWriter, Class<? extends D> writerDataClass,
int numBranches, int branchId, State writerProperties) {
this.wrappedWriter = wrappedWriter;
this.writerDataClass = writerDataClass;
this.numBranches = numBranches;
this.branchId = branchId;
this.properties = writerProperties;
GlobalMetadata defaultMetadata = null;
if (wrappedWriter instanceof MetadataAwareWriter) {
defaultMetadata = ((MetadataAwareWriter) wrappedWriter).getDefaultMetadata();
}
this.metadataCollector = new GlobalMetadataCollector(defaultMetadata, GlobalMetadataCollector.UNLIMITED_SIZE);
}
@Override
@SuppressWarnings("unchecked")
public void write(Object untypedRecord)
throws IOException {
if (untypedRecord instanceof RecordWithMetadata) {
RecordWithMetadata record = (RecordWithMetadata)untypedRecord;
GlobalMetadata globalMetadata = record.getMetadata().getGlobalMetadata();
metadataCollector.processMetadata(globalMetadata);
if (RecordWithMetadata.class.isAssignableFrom(writerDataClass)) {
wrappedWriter.write(record);
} else {
wrappedWriter.write(record.getRecord());
}
} else {
metadataCollector.processMetadata(null);
wrappedWriter.write(untypedRecord);
}
}
/**
* Write combined metadata to the {@link ConfigurationKeys#WRITER_METADATA_KEY} parameter.
*/
protected void writeMetadata() throws IOException {
Set<GlobalMetadata> collectedMetadata = metadataCollector.getMetadataRecords();
if (collectedMetadata.isEmpty()) {
return;
}
String propName =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_METADATA_KEY, numBranches, branchId);
String metadataStr;
if (collectedMetadata.size() == 1) {
metadataStr = collectedMetadata.iterator().next().toJson();
} else {
StringBuilder sb = new StringBuilder();
sb.append('[');
boolean first = true;
for (GlobalMetadata md : collectedMetadata) {
if (!first) {
sb.append(',');
}
sb.append(md.toJson());
first = false;
}
sb.append(']');
metadataStr = sb.toString();
}
this.properties.setProp(propName, metadataStr);
}
@Override
public void commit()
throws IOException {
writeMetadata();
wrappedWriter.commit();
}
@Override
public void cleanup()
throws IOException {
wrappedWriter.cleanup();
}
@Override
public long recordsWritten() {
return wrappedWriter.recordsWritten();
}
@Override
public long bytesWritten()
throws IOException {
return wrappedWriter.recordsWritten();
}
@Override
public void close()
throws IOException {
wrappedWriter.close();
}
@Override
public void flush() throws IOException {
wrappedWriter.flush();
}
}
| 3,069 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/Retriable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.github.rholder.retry.RetryerBuilder;
/**
* Interface that provides RetryerBuilder so that specific Retry logic can be injected.
*/
public interface Retriable {
public RetryerBuilder<Void> getRetryerBuilder();
} | 3,070 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/RetryWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.github.rholder.retry.Attempt;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.RetryListener;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import org.apache.gobblin.commit.SpeculativeAttemptAwareConstruct;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.exception.NonTransientException;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.FinalState;
/**
* Retry writer follows decorator pattern that retries on inner writer's failure.
* @param <D>
*/
public class RetryWriter<D> extends WatermarkAwareWriterWrapper<D> implements DataWriter<D>, FinalState, SpeculativeAttemptAwareConstruct {
private static final Logger LOG = LoggerFactory.getLogger(RetryWriter.class);
public static final String RETRY_CONF_PREFIX = "gobblin.writer.retry.";
public static final String RETRY_WRITER_ENABLED = RETRY_CONF_PREFIX + "enabled";
public static final String FAILED_RETRY_WRITES_METER = RETRY_CONF_PREFIX + "failed_writes";
public static final String RETRY_MULTIPLIER = RETRY_CONF_PREFIX + "multiplier";
public static final String RETRY_MAX_WAIT_MS_PER_INTERVAL = RETRY_CONF_PREFIX + "max_wait_ms_per_interval";
public static final String RETRY_MAX_ATTEMPTS = RETRY_CONF_PREFIX + "max_attempts";
public static final String FAILED_WRITES_KEY = "FailedWrites";
private final DataWriter<D> writer;
private final Retryer<Void> retryer;
private long failedWrites;
public RetryWriter(DataWriter<D> writer, State state) {
this.writer = writer;
this.retryer = buildRetryer(state);
if (this.writer instanceof WatermarkAwareWriter) {
setWatermarkAwareWriter((WatermarkAwareWriter) this.writer);
}
}
/**
* Build Retryer.
* - If Writer implements Retriable, it will use the RetryerBuilder from the writer.
* - Otherwise, it will use DEFAULT writer builder.
*
* - If Gobblin metrics is enabled, it will emit all failure count in to metrics.
*
* @param state
* @return
*/
private Retryer<Void> buildRetryer(State state) {
RetryerBuilder<Void> builder = null;
if (writer instanceof Retriable) {
builder = ((Retriable) writer).getRetryerBuilder();
} else {
builder = createRetryBuilder(state);
}
if (GobblinMetrics.isEnabled(state)) {
final Optional<Meter> retryMeter = Optional.of(Instrumented.getMetricContext(state, getClass()).meter(FAILED_RETRY_WRITES_METER));
builder.withRetryListener(new RetryListener() {
@Override
public <V> void onRetry(Attempt<V> attempt) {
if (attempt.hasException()) {
Instrumented.markMeter(retryMeter);
failedWrites++;
}
}
});
}
return builder.build();
}
@Override
public void close() throws IOException {
writer.close();
}
@Override
public void writeEnvelope(RecordEnvelope<D> recordEnvelope) throws IOException {
//Need a Callable interface to be wrapped by Retryer.
Callable<Void> writeCall = new Callable<Void>() {
@Override
public Void call() throws Exception {
writer.writeEnvelope(recordEnvelope);
return null;
}
};
callWithRetry(writeCall);
}
@Override
public void commit() throws IOException {
Callable<Void> commitCall = new Callable<Void>() {
@Override
public Void call() throws Exception {
writer.commit();
return null;
}
};
callWithRetry(commitCall);
}
private void callWithRetry(Callable<Void> callable) throws IOException {
try {
this.retryer.wrap(callable).call();
} catch (RetryException e) {
throw new IOException(e.getLastFailedAttempt().getExceptionCause());
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public void cleanup() throws IOException {
writer.cleanup();
}
@Override
public long recordsWritten() {
return writer.recordsWritten();
}
@Override
public long bytesWritten() throws IOException {
return writer.bytesWritten();
}
/**
* @return RetryerBuilder that retries on all exceptions except NonTransientException with exponential back off
*/
public static RetryerBuilder<Void> createRetryBuilder(State state) {
Predicate<Throwable> transients = new Predicate<Throwable>() {
@Override
public boolean apply(Throwable t) {
return !(t instanceof NonTransientException);
}
};
long multiplier = state.getPropAsLong(RETRY_MULTIPLIER, 500L);
long maxWaitMsPerInterval = state.getPropAsLong(RETRY_MAX_WAIT_MS_PER_INTERVAL, 10000);
// Setting retry attempts to 1 because Retrying is not possible for every kind of source and target record types
// e.g. 1) if the source and destination are InputStream and OutputStream respectively as in the case of
// FileAwareInputStreamDataWriter, we may need to reset the InputStream to the beginning, which, depending upon the
// implementation of InputStream is not always possible, 2) we need to reopen the InputStream which is closed in
// the finally block of writeImpl after the first attempt.
int maxAttempts = state.getPropAsInt(RETRY_MAX_ATTEMPTS, 1);
return RetryerBuilder.<Void> newBuilder()
.retryIfException(transients)
.withWaitStrategy(WaitStrategies.exponentialWait(multiplier, maxWaitMsPerInterval, TimeUnit.MILLISECONDS)) //1, 2, 4, 8, 16 seconds delay
.withStopStrategy(StopStrategies.stopAfterAttempt(maxAttempts)) //Total 5 attempts and fail.
.withRetryListener(new RetryListener() {
@Override
public <V> void onRetry(Attempt<V> attempt) {
// We can get different exceptions on each attempt. The first one can be meaningful, and follow up
// exceptions can come from incorrect state of the system, and hide the real problem. Logging all of them
// to simplify troubleshooting
if (attempt.hasException() && attempt.getAttemptNumber() < maxAttempts) {
LOG.warn("Caught exception. Operation will be retried. Attempt #" + attempt.getAttemptNumber(),
attempt.getExceptionCause());
}
}
});
}
@Override
public boolean isSpeculativeAttemptSafe() {
if (this.writer instanceof SpeculativeAttemptAwareConstruct) {
return ((SpeculativeAttemptAwareConstruct)this.writer).isSpeculativeAttemptSafe();
}
return false;
}
@Override
public State getFinalState() {
State state = new State();
if (this.writer instanceof FinalState) {
state.addAll(((FinalState)this.writer).getFinalState());
} else {
LOG.warn("Wrapped writer does not implement FinalState: " + this.writer.getClass());
}
state.setProp(FAILED_WRITES_KEY, this.failedWrites);
return state;
}
@Override
public ControlMessageHandler getMessageHandler() {
return this.writer.getMessageHandler();
}
@Override
public void flush() throws IOException {
this.writer.flush();
}
}
| 3,071 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/ThrottleWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Timer;
import com.github.rholder.retry.RetryerBuilder;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.RateBasedLimiter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.util.FinalState;
/**
* Throttle writer follows decorator pattern that throttles inner writer by either QPS or by bytes.
* @param <D>
*/
public class ThrottleWriter<D> extends WriterWrapper<D> implements Decorator, FinalState, Retriable {
private static final Logger LOG = LoggerFactory.getLogger(ThrottleWriter.class);
public static final String WRITER_THROTTLE_TYPE_KEY = "gobblin.writer.throttle_type";
public static final String WRITER_LIMIT_RATE_LIMIT_KEY = "gobblin.writer.throttle_rate";
public static final String WRITES_THROTTLED_TIMER = "gobblin.writer.throttled_time";
public static final String THROTTLED_TIME_KEY = "ThrottledTime";
private static final String LOCAL_JOB_LAUNCHER_TYPE = "LOCAL";
public static enum ThrottleType {
QPS,
Bytes
}
private final State state;
private final DataWriter<D> writer;
private final Limiter limiter;
private final ThrottleType type;
private final Optional<Timer> throttledTimer;
private long throttledTime;
public ThrottleWriter(DataWriter<D> writer, State state) {
Preconditions.checkNotNull(writer, "DataWriter is required.");
Preconditions.checkNotNull(state, "State is required.");
this.state = state;
this.writer = writer;
this.type = ThrottleType.valueOf(state.getProp(WRITER_THROTTLE_TYPE_KEY));
int rateLimit = computeRateLimit(state);
LOG.info("Rate limit for each writer: " + rateLimit + " " + type);
this.limiter = new RateBasedLimiter(computeRateLimit(state));
if (GobblinMetrics.isEnabled(state)) {
throttledTimer = Optional.<Timer>of(Instrumented.getMetricContext(state, getClass()).timer(WRITES_THROTTLED_TIMER));
} else {
throttledTimer = Optional.absent();
}
}
@Override
public Object getDecoratedObject() {
return this.writer;
}
/**
* Compute rate limit per executor.
* Rate limit = Total rate limit / # of parallelism
*
* # of parallelism:
* - if LOCAL job type Min(# of source partition, thread pool size)
* - else Min(# of source partition, # of max mappers)
*
* @param state
* @return
*/
private int computeRateLimit(State state) {
String jobLauncherType = state.getProp(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, "LOCAL");
int parallelism = 1;
if (LOCAL_JOB_LAUNCHER_TYPE.equals(jobLauncherType)) {
parallelism = state.getPropAsInt(ConfigurationKeys.TASK_EXECUTOR_THREADPOOL_SIZE_KEY,
ConfigurationKeys.DEFAULT_TASK_EXECUTOR_THREADPOOL_SIZE);
} else {
parallelism = state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY,
ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);
}
parallelism = Math.min(parallelism, state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS,
ConfigurationKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS));
parallelism = Math.max(parallelism, 1);
int rateLimit = state.getPropAsInt(WRITER_LIMIT_RATE_LIMIT_KEY) / parallelism;
rateLimit = Math.max(rateLimit, 1);
return rateLimit;
}
/**
* Calls inner writer with self throttling.
* If the throttle type is byte, it applies throttle after write happens.
* This is because it can figure out written bytes after it's written. It's not ideal but throttling after write should be sufficient for most cases.
* {@inheritDoc}
* @see org.apache.gobblin.writer.DataWriter#write(java.lang.Object)
*/
@Override
public void writeEnvelope(RecordEnvelope<D> record) throws IOException {
try {
if (ThrottleType.QPS.equals(type)) {
acquirePermits(1L);
}
long beforeWrittenBytes = writer.bytesWritten();
writer.writeEnvelope(record);
if (ThrottleType.Bytes.equals(type)) {
long delta = writer.bytesWritten() - beforeWrittenBytes;
if (delta < 0) {
throw new UnsupportedOperationException("Cannot throttle on bytes because "
+ writer.getClass().getSimpleName() + " does not supports bytesWritten");
}
if (delta > 0) {
acquirePermits(delta);
}
}
} catch (InterruptedException e) {
throw new IOException("Failed while acquiring permits.",e);
}
}
/**
* Acquire permit along with emitting metrics if enabled.
* @param permits
* @throws InterruptedException
*/
private void acquirePermits(long permits) throws InterruptedException {
long startMs = System.currentTimeMillis(); //Measure in milliseconds. (Nanoseconds are more precise but expensive and not worth for this case)
limiter.acquirePermits(permits);
long permitAcquisitionTime = System.currentTimeMillis() - startMs;
if (throttledTimer.isPresent()) { // Metrics enabled
Instrumented.updateTimer(throttledTimer, permitAcquisitionTime, TimeUnit.MILLISECONDS);
}
this.throttledTime += permitAcquisitionTime;
}
@Override
public void close() throws IOException {
writer.close();
}
@Override
public void commit() throws IOException {
writer.commit();
}
@Override
public void cleanup() throws IOException {
writer.cleanup();
}
@Override
public long recordsWritten() {
return writer.recordsWritten();
}
@Override
public long bytesWritten() throws IOException {
return writer.bytesWritten();
}
@Override
public RetryerBuilder<Void> getRetryerBuilder() {
if (writer instanceof Retriable) {
return ((Retriable) writer).getRetryerBuilder();
}
return RetryWriter.createRetryBuilder(state);
}
@Override
public State getFinalState() {
State state = new State();
if (this.writer instanceof FinalState) {
state.addAll(((FinalState)this.writer).getFinalState());
} else {
LOG.warn("Wrapped writer does not implement FinalState: " + this.writer.getClass());
}
state.setProp(THROTTLED_TIME_KEY, this.throttledTime);
return state;
}
@Override
public void flush() throws IOException {
this.writer.flush();
}
}
| 3,072 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/HiveWritableHdfsDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.JobConf;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.State;
/**
* An extension to {@link FsDataWriter} that writes {@link Writable} records using an
* {@link org.apache.hadoop.mapred.OutputFormat} that implements {@link HiveOutputFormat}.
*
* The records are written using a {@link RecordWriter} created by
* {@link HiveOutputFormat#getHiveRecordWriter(JobConf, org.apache.hadoop.fs.Path, Class, boolean,
* java.util.Properties, org.apache.hadoop.util.Progressable)}.
*
* @author Ziyang Liu
*/
public class HiveWritableHdfsDataWriter extends FsDataWriter<Writable> {
protected RecordWriter writer;
protected final AtomicLong count = new AtomicLong(0);
// the close method may be invoked multiple times, but the underlying writer only supports close being called once
private boolean closed = false;
public HiveWritableHdfsDataWriter(HiveWritableHdfsDataWriterBuilder<?> builder, State properties) throws IOException {
super(builder, properties);
Preconditions.checkArgument(this.properties.contains(HiveWritableHdfsDataWriterBuilder.WRITER_OUTPUT_FORMAT_CLASS));
this.writer = getWriter();
}
private RecordWriter getWriter() throws IOException {
try {
HiveOutputFormat<?, ?> outputFormat = HiveOutputFormat.class
.cast(Class.forName(this.properties.getProp(HiveWritableHdfsDataWriterBuilder.WRITER_OUTPUT_FORMAT_CLASS))
.newInstance());
@SuppressWarnings("unchecked")
Class<? extends Writable> writableClass = (Class<? extends Writable>) Class
.forName(this.properties.getProp(HiveWritableHdfsDataWriterBuilder.WRITER_WRITABLE_CLASS));
// Merging Job Properties into JobConf for easy tuning
JobConf loadedJobConf = new JobConf();
for (Object key : this.properties.getProperties().keySet()) {
loadedJobConf.set((String)key, this.properties.getProp((String)key));
}
return outputFormat.getHiveRecordWriter(loadedJobConf, this.stagingFile, writableClass, true,
this.properties.getProperties(), null);
} catch (Throwable t) {
throw new IOException(String.format("Failed to create writer"), t);
}
}
@Override
public void write(Writable record) throws IOException {
Preconditions.checkNotNull(record);
this.writer.write(record);
this.count.incrementAndGet();
}
@Override
public long recordsWritten() {
return this.count.get();
}
@Override
public long bytesWritten() throws IOException {
if (!this.fs.exists(this.outputFile)) {
return 0;
}
return this.fs.getFileStatus(this.outputFile).getLen();
}
@Override
public void close() throws IOException {
closeInternal();
super.close();
}
@Override
public void commit() throws IOException {
closeInternal();
super.commit();
}
private void closeInternal() throws IOException {
// close the underlying writer if not already closed. The close can only be called once for the underlying writer,
// so remember the state
if (!this.closed) {
this.writer.close(false);
// release reference to allow GC since this writer can hold onto large buffers for some formats like ORC.
this.writer = null;
this.closed = true;
}
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == HiveWritableHdfsDataWriter.class;
}
}
| 3,073 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/FsDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.commit.SpeculativeAttemptAwareConstruct;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.metadata.types.GlobalMetadata;
import org.apache.gobblin.util.FinalState;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.JobConfigurationUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.recordcount.IngestionRecordCountProvider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of {@link DataWriter} does the work of setting the output/staging dir
* and creating the FileSystem instance.
*
* @author akshay@nerdwallet.com
*/
public abstract class FsDataWriter<D> implements DataWriter<D>, FinalState, MetadataAwareWriter, SpeculativeAttemptAwareConstruct {
private static final Logger LOG = LoggerFactory.getLogger(FsDataWriter.class);
public static final String WRITER_INCLUDE_RECORD_COUNT_IN_FILE_NAMES =
ConfigurationKeys.WRITER_PREFIX + ".include.record.count.in.file.names";
public static final String FS_WRITER_METRICS_KEY = "fs_writer_metrics";
protected final State properties;
protected final Configuration conf;
protected final String id;
protected final int numBranches;
protected final int branchId;
protected final String fileName;
protected final FileSystem fs;
protected final FileContext fileContext;
protected Path stagingFile;
protected final String partitionKey;
private final GlobalMetadata defaultMetadata;
protected Path outputFile;
protected final String allOutputFilesPropName;
protected final boolean shouldIncludeRecordCountInFileName;
protected final int bufferSize;
protected final short replicationFactor;
protected final long blockSize;
protected final FsPermission filePermission;
protected final FsPermission dirPermission;
protected final Optional<String> group;
protected final Closer closer = Closer.create();
protected final Optional<String> writerAttemptIdOptional;
protected Optional<Long> bytesWritten;
private final List<StreamCodec> encoders;
public FsDataWriter(FsDataWriterBuilder<?, ?> builder, State properties) throws IOException {
this.properties = properties;
this.id = builder.getWriterId();
this.numBranches = builder.getBranches();
this.branchId = builder.getBranch();
this.fileName = builder.getFileName(properties);
this.writerAttemptIdOptional = Optional.fromNullable(builder.getWriterAttemptId());
this.encoders = builder.getEncoders();
this.conf = new Configuration();
// Add all job configuration properties so they are picked up by Hadoop
JobConfigurationUtils.putStateIntoConfiguration(properties, conf);
this.fs = WriterUtils.getWriterFS(properties, this.numBranches, this.branchId);
this.fileContext = FileContext.getFileContext(
WriterUtils.getWriterFsUri(properties, this.numBranches, this.branchId),
conf);
// Initialize staging/output directory
Path writerStagingDir = this.writerAttemptIdOptional.isPresent() ? WriterUtils
.getWriterStagingDir(properties, this.numBranches, this.branchId, this.writerAttemptIdOptional.get())
: WriterUtils.getWriterStagingDir(properties, this.numBranches, this.branchId);
this.stagingFile = new Path(writerStagingDir, this.fileName);
this.outputFile =
new Path(WriterUtils.getWriterOutputDir(properties, this.numBranches, this.branchId), this.fileName);
this.allOutputFilesPropName = ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_FINAL_OUTPUT_FILE_PATHS, this.numBranches, this.branchId);
// Deleting the staging file if it already exists, which can happen if the
// task failed and the staging file didn't get cleaned up for some reason.
// Deleting the staging file prevents the task retry from being blocked.
if (this.fs.exists(this.stagingFile)) {
LOG.warn(String.format("Task staging file %s already exists, deleting it", this.stagingFile));
HadoopUtils.deletePath(this.fs, this.stagingFile, false);
}
this.shouldIncludeRecordCountInFileName = properties.getPropAsBoolean(ForkOperatorUtils
.getPropertyNameForBranch(WRITER_INCLUDE_RECORD_COUNT_IN_FILE_NAMES, this.numBranches, this.branchId), false);
this.bufferSize = properties.getPropAsInt(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_BUFFER_SIZE, this.numBranches, this.branchId),
ConfigurationKeys.DEFAULT_BUFFER_SIZE);
this.replicationFactor = properties.getPropAsShort(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_REPLICATION_FACTOR, this.numBranches, this.branchId),
this.fs.getDefaultReplication(this.outputFile));
this.blockSize = properties.getPropAsLong(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_BLOCK_SIZE, this.numBranches, this.branchId),
this.fs.getDefaultBlockSize(this.outputFile));
this.filePermission = HadoopUtils.deserializeWriterFilePermissions(properties, this.numBranches, this.branchId);
this.dirPermission = HadoopUtils.deserializeWriterDirPermissions(properties, this.numBranches, this.branchId);
this.group = Optional.fromNullable(properties.getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_GROUP_NAME, this.numBranches, this.branchId)));
// Create the parent directory of the output file if it does not exist
WriterUtils.mkdirsWithRecursivePermission(this.fs, this.outputFile.getParent(), this.dirPermission);
this.bytesWritten = Optional.absent();
this.defaultMetadata = new GlobalMetadata();
for (StreamCodec c : getEncoders()) {
this.defaultMetadata.addTransferEncoding(c.getTag());
}
this.partitionKey = builder.getPartitionPath(properties);
if (builder.getPartitionPath(properties) != null) {
properties.setProp(ConfigurationKeys.WRITER_PARTITION_PATH_KEY + "_" + builder.getWriterId(), partitionKey);
}
}
@Override
public Descriptor getDataDescriptor() {
// Dataset is resulted from WriterUtils.getWriterOutputDir(properties, this.numBranches, this.branchId)
// The writer dataset might not be same as the published dataset
DatasetDescriptor datasetDescriptor =
new DatasetDescriptor(fs.getScheme(), fs.getUri(), outputFile.getParent().toString());
if (partitionKey == null) {
return datasetDescriptor;
}
return new PartitionDescriptor(partitionKey, datasetDescriptor);
}
/**
* Create the staging output file and an {@link OutputStream} to write to the file.
*
* @return an {@link OutputStream} to write to the staging file
* @throws IOException if it fails to create the file and the {@link OutputStream}
*/
protected OutputStream createStagingFileOutputStream()
throws IOException {
OutputStream out = this.fs
.create(this.stagingFile, this.filePermission, true, this.bufferSize, this.replicationFactor, this.blockSize,
null);
// encoders need to be attached to the stream in reverse order since we should write to the
// innermost encoder first
for (StreamCodec encoder : Lists.reverse(getEncoders())) {
out = encoder.encodeOutputStream(out);
}
return this.closer.register(out);
}
/**
* Set the group name of the staging output file.
*
* @throws IOException if it fails to set the group name
*/
protected void setStagingFileGroup()
throws IOException {
Preconditions.checkArgument(this.fs.exists(this.stagingFile),
String.format("Staging output file %s does not exist", this.stagingFile));
if (this.group.isPresent()) {
HadoopUtils.setGroup(this.fs, this.stagingFile, this.group.get());
}
}
protected List<StreamCodec> getEncoders() {
return encoders;
}
public GlobalMetadata getDefaultMetadata() {
return defaultMetadata;
}
@Override
public long bytesWritten()
throws IOException {
if (this.bytesWritten.isPresent()) {
return this.bytesWritten.get().longValue();
}
return 0l;
}
/**
* {@inheritDoc}.
*
* <p>
* This default implementation simply renames the staging file to the output file. If the output file
* already exists, it will delete it first before doing the renaming.
* </p>
*
* @throws IOException if any file operation fails
*/
@Override
public void commit()
throws IOException {
this.closer.close();
setStagingFileGroup();
if (!this.fs.exists(this.stagingFile)) {
throw new IOException(String.format("File %s does not exist", this.stagingFile));
}
FileStatus stagingFileStatus = this.fs.getFileStatus(this.stagingFile);
// Double check permission of staging file
if (!stagingFileStatus.getPermission().equals(this.filePermission)) {
this.fs.setPermission(this.stagingFile, this.filePermission);
}
this.bytesWritten = Optional.of(Long.valueOf(stagingFileStatus.getLen()));
// Rename staging file to add record count before copying to output file
if (this.shouldIncludeRecordCountInFileName) {
String filePathWithRecordCount = addRecordCountToStagingFile();
this.stagingFile = new Path(filePathWithRecordCount);
this.outputFile = new Path(this.outputFile.getParent().toString(), new Path(filePathWithRecordCount).getName());
}
LOG.info(String.format("Moving data from %s to %s", this.stagingFile, this.outputFile));
// For the same reason as deleting the staging file if it already exists, overwrite
// the output file if it already exists to prevent task retry from being blocked.
HadoopUtils.renamePath(this.fs, this.stagingFile, this.outputFile, true, this.conf);
this.properties.appendToSetProp(this.allOutputFilesPropName, this.outputFile.toString());
FsWriterMetrics metrics = new FsWriterMetrics(
this.id,
new PartitionIdentifier(this.partitionKey, this.branchId),
ImmutableSet.of(new FsWriterMetrics.FileInfo(this.outputFile.getName(), recordsWritten()))
);
this.properties.setProp(FS_WRITER_METRICS_KEY, metrics.toJson());
}
/**
* {@inheritDoc}.
*
* <p>
* This default implementation simply deletes the staging file if it exists.
* </p>
*
* @throws IOException if deletion of the staging file fails
*/
@Override
public void cleanup()
throws IOException {
// Delete the staging file
if (this.fs.exists(this.stagingFile)) {
HadoopUtils.deletePath(this.fs, this.stagingFile, false);
}
}
@Override
public void close()
throws IOException {
this.closer.close();
}
private synchronized String addRecordCountToStagingFile()
throws IOException {
String filePath = this.stagingFile.toString();
if(IngestionRecordCountProvider.containsRecordCount(filePath)) {
LOG.info(String.format("Path %s already has record count", filePath));
return filePath;
}
String filePathWithRecordCount = IngestionRecordCountProvider.constructFilePath(filePath, recordsWritten());
LOG.info("Renaming " + filePath + " to " + filePathWithRecordCount);
HadoopUtils.renamePath(this.fs, new Path(filePath), new Path(filePathWithRecordCount), true);
return filePathWithRecordCount;
}
@Override
public State getFinalState() {
State state = new State();
try {
state.setProp("RecordsWritten", recordsWritten());
} catch (Exception exception) {
// If Writer fails to return recordsWritten, it might not be implemented, or implemented incorrectly.
// Omit property instead of failing.
LOG.warn("Failed to get final state recordsWritten", exception);
}
try {
state.setProp("BytesWritten", bytesWritten());
} catch (Exception exception) {
// If Writer fails to return bytesWritten, it might not be implemented, or implemented incorrectly.
// Omit property instead of failing.
LOG.warn("Failed to get final state bytesWritten", exception);
}
return state;
}
/**
* Get the output file path.
*
* @return the output file path
*/
public String getOutputFilePath() {
return this.outputFile.toString();
}
/**
* Get the fully-qualified output file path.
*
* @return the fully-qualified output file path
*/
public String getFullyQualifiedOutputFilePath() {
return this.fs.makeQualified(this.outputFile).toString();
}
/**
* Classes that extends this method needs to determine if writerAttemptIdOptional is present and to avoid
* problems of overriding, adding another checking on class type.
*/
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == FsDataWriter.class;
}
}
| 3,074 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/initializer/MultiWriterInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.initializer;
import org.apache.gobblin.initializer.Initializer;
import org.apache.gobblin.initializer.MultiInitializer;
import java.util.List;
import lombok.ToString;
@ToString
public class MultiWriterInitializer implements WriterInitializer {
private final Initializer intializer;
public MultiWriterInitializer(List<WriterInitializer> writerInitializers) {
this.intializer = new MultiInitializer(writerInitializers);
}
@Override
public void initialize() {
this.intializer.initialize();
}
@Override
public void close() {
this.intializer.close();
}
}
| 3,075 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/initializer/WriterInitializerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.initializer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.writer.DataWriterBuilder;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* Factory method pattern class provides WriterInitializer based on writer and state.
*/
public class WriterInitializerFactory {
/**
* Provides WriterInitializer based on the writer. Mostly writer is decided by the Writer builder (and destination) that user passes.
* If there's more than one branch, it will instantiate same number of WriterInitializer instance as number of branches and combine it into MultiWriterInitializer.
*
* @param state
* @return WriterInitializer
*/
public static WriterInitializer newInstace(State state, WorkUnitStream workUnits) {
int branches = state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1);
if (branches == 1) {
return newSingleInstance(state, workUnits, branches, 0);
}
List<WriterInitializer> wis = Lists.newArrayList();
for (int branchId = 0; branchId < branches; branchId++) {
wis.add(newSingleInstance(state, workUnits, branches, branchId));
}
return new MultiWriterInitializer(wis);
}
private static WriterInitializer newSingleInstance(State state, WorkUnitStream workUnits, int branches, int branchId) {
Preconditions.checkNotNull(state);
String writerBuilderKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_BUILDER_CLASS, branches, branchId);
String writerBuilderClass = state.getProp(writerBuilderKey, ConfigurationKeys.DEFAULT_WRITER_BUILDER_CLASS);
DataWriterBuilder dataWriterBuilder;
try {
dataWriterBuilder = (DataWriterBuilder) Class.forName(writerBuilderClass).newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
return dataWriterBuilder.getInitializer(state, workUnits, branches, branchId);
}
}
| 3,076 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/test/TestingEventBusAsserter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.test;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import javax.annotation.Nonnull;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import org.apache.gobblin.writer.test.TestingEventBuses.Event;
import lombok.AllArgsConstructor;
/**
* A wrapper around an EventBus created with {@link TestingEventBuses} that implements various
* asserts on the incoming messages.
*
* <p><b>Important:</b> This class must be instantiated before any messages are sent on the bus or
* it won't detect them.
*/
public class TestingEventBusAsserter implements Closeable {
private final BlockingDeque<TestingEventBuses.Event> _events = new LinkedBlockingDeque<>();
private final EventBus _eventBus;
private long _defaultTimeoutValue = 1;
private TimeUnit _defaultTimeoutUnit = TimeUnit.SECONDS;
@AllArgsConstructor
public static class StaticMessage implements Function<TestingEventBuses.Event, String> {
private final String message;
@Override public String apply(Event input) {
return this.message;
}
}
public TestingEventBusAsserter(String eventBusId) {
_eventBus = TestingEventBuses.getEventBus(eventBusId);
_eventBus.register(this);
}
@Subscribe public void processEvent(TestingEventBuses.Event e) {
_events.offer(e);
}
@Override public void close() throws IOException {
_eventBus.unregister(this);
}
public BlockingDeque<TestingEventBuses.Event> getEvents() {
return _events;
}
public void clear() {
_events.clear();
}
/** Sets timeout for all subsequent blocking asserts. */
public TestingEventBusAsserter withTimeout(long timeout, TimeUnit unit) {
_defaultTimeoutValue = timeout;
_defaultTimeoutUnit = unit;
return this;
}
/** Gets the next event from the queue and validates that it satisfies a given predicate. Blocking
* assert. The event is removed from the internal queue regardless if the predicate has been
* satisfied.
* @param predicate the predicate to apply on the next event
* @param assert error message generator
* @return the event if the predicate is satisfied
* @throws AssertionError if the predicate is not satisfied
*/
public TestingEventBuses.Event assertNext(final Predicate<TestingEventBuses.Event> predicate,
Function<TestingEventBuses.Event, String> messageGen
) throws InterruptedException, TimeoutException {
TestingEventBuses.Event nextEvent = _events.pollFirst(_defaultTimeoutValue, _defaultTimeoutUnit);
if (null == nextEvent) {
throw new TimeoutException();
}
if (!predicate.apply(nextEvent)) {
throw new AssertionError(messageGen.apply(nextEvent));
}
return nextEvent;
}
/**
* Variation on {@link #assertNext(Predicate, Function)} with a constant message.
*/
public TestingEventBuses.Event assertNext(final Predicate<TestingEventBuses.Event> predicate,
final String message) throws InterruptedException, TimeoutException {
return assertNext(predicate, new StaticMessage(message));
}
/** Similar to {@link #assertNext(Predicate, Function)} but predicate is on the value directly. */
public <T> TestingEventBuses.Event assertNextValue(final Predicate<T> predicate ,
Function<TestingEventBuses.Event, String> messageGen)
throws InterruptedException, TimeoutException {
return assertNext(new Predicate<TestingEventBuses.Event>() {
@Override public boolean apply(@Nonnull Event input) {
return predicate.apply(input.<T>getTypedValue());
}
}, messageGen);
}
/** Similar to {@link #assertNext(Predicate, String)} but predicate is on the value directly. */
public <T> TestingEventBuses.Event assertNextValue(final Predicate<T> predicate, String message)
throws InterruptedException, TimeoutException {
return assertNext(new Predicate<TestingEventBuses.Event>() {
@Override public boolean apply(@Nonnull Event input) {
return predicate.apply(input.<T>getTypedValue());
}
}, message);
}
public <T> TestingEventBuses.Event assertNextValueEq(final T expected)
throws InterruptedException, TimeoutException {
return assertNextValue(Predicates.equalTo(expected),
new Function<TestingEventBuses.Event, String>() {
@Override public String apply(@Nonnull Event input) {
return "Event value mismatch: " + input.getValue() + " != " + expected;
}
});
}
/**
* Verify that all next several values are a permutation of the expected collection of event
* values. This method allows for testing that certain values are produced in some random order.
* Blocking assert. */
public <T> void assertNextValuesEq(final Collection<T> expected)
throws InterruptedException, TimeoutException {
final Set<T> remainingExpectedValues = new HashSet<>(expected);
final Predicate<T> checkInRemainingAndRemove = new Predicate<T>() {
@Override public boolean apply(@Nonnull T input) {
if (! remainingExpectedValues.contains(input)) {
return false;
}
remainingExpectedValues.remove(input);
return true;
}
};
while (remainingExpectedValues.size() > 0) {
assertNextValue(checkInRemainingAndRemove,
new Function<TestingEventBuses.Event, String>() {
@Override public String apply(@Nonnull Event input) {
return "Event value " + input.getValue() + " not in set " + remainingExpectedValues;
}
});
}
}
}
| 3,077 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/test/TestingEventBuses.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.test;
import java.util.concurrent.ExecutionException;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.eventbus.EventBus;
import lombok.Getter;
import lombok.ToString;
/**
* Maintains a static set of EventBus instances for testing purposes by
* {@link GobblinTestEventBusWriter}. Obviously, this class should be used only in test VMs with
* limited life span.
*/
public class TestingEventBuses {
private static final LoadingCache<String, EventBus> _instances =
CacheBuilder.newBuilder().build(new CacheLoader<String, EventBus>(){
@Override public EventBus load(String key) throws Exception {
return new EventBus(key);
}
});
public static EventBus getEventBus(String eventBusId) {
try {
return _instances.get(eventBusId);
} catch (ExecutionException e) {
throw new RuntimeException("Unable to create an EventBus with id " + eventBusId + ": " + e, e);
}
}
@Getter
@ToString
public static class Event {
private final Object value;
private final long timestampNanos;
public Event(Object value) {
this.value = value;
this.timestampNanos = System.nanoTime();
}
@SuppressWarnings("unchecked")
public <T> T getTypedValue() {
return (T)this.value;
}
public boolean valueEquals(Object otherValue) {
if (null == this.value) {
return null == otherValue;
}
return this.value.equals(otherValue);
}
}
}
| 3,078 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/test/GobblinTestEventBusWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.test;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Optional;
import com.google.common.eventbus.EventBus;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import lombok.Data;
/**
* This class is meant for automated testing of Gobblin job executions. It will write any object it
* receives to a Guava EventBus . Tests can subscribe to the event bus and monitor what records are
* being produced.
*
* <p>By default, the class will use TestingEventBuses to create an EventBus with name
* {@link ConfigurationKeys#WRITER_OUTPUT_DIR}.
*
* <p>Note that the EventBus instances are static (to simplify the sharing between writer and tests).
* It is responsibility of the test to make sure that names of those are unique to avoid cross-
* pollution between tests.
*/
public class GobblinTestEventBusWriter implements DataWriter<Object> {
private final EventBus _eventBus;
private final AtomicLong _recordCount = new AtomicLong();
private final Mode _mode;
private long _firstRecordTimestamp;
private long _lastRecordTimestamp;
public enum Mode {
/** Will post every record to eventbus. */
POST_RECORDS,
/** Will count records and post a summary to eventbus at commit time. */
COUNTING
}
/** The topic to use for writing */
public static final String EVENTBUSID_KEY = "GobblinTestEventBusWriter.eventBusId";
public static final String MODE_KEY = "GobblinTestEventBusWriter.mode";
public static final String FULL_EVENTBUSID_KEY =
ConfigurationKeys.WRITER_PREFIX + "." + EVENTBUSID_KEY;
public static final String FULL_MODE_KEY = ConfigurationKeys.WRITER_PREFIX + "." + MODE_KEY;
public GobblinTestEventBusWriter(EventBus eventBus, Mode mode) {
_eventBus = eventBus;
_mode = mode;
}
public GobblinTestEventBusWriter(String eventBusId, Mode mode) {
this(TestingEventBuses.getEventBus(eventBusId), mode);
}
@Override
public void close() throws IOException {
// Nothing to do
}
@Override
public void write(Object record) throws IOException {
if (_firstRecordTimestamp == 0) {
_firstRecordTimestamp = System.currentTimeMillis();
}
if (this._mode == Mode.POST_RECORDS) {
_eventBus.post(new TestingEventBuses.Event(record));
}
_lastRecordTimestamp = System.currentTimeMillis();
_recordCount.incrementAndGet();
}
@Override
public void commit() throws IOException {
if (this._mode == Mode.COUNTING) {
_eventBus.post(new TestingEventBuses.Event(new RunSummary(_recordCount.get(), _lastRecordTimestamp - _firstRecordTimestamp)));
}
}
@Override
public void cleanup() throws IOException {
// Nothing to do
}
@Override
public long recordsWritten() {
return _recordCount.get();
}
@Override
public long bytesWritten() throws IOException {
// Not meaningful
return _recordCount.get();
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends DataWriterBuilder<Object, Object> {
private Optional<String> _eventBusId = Optional.absent();
public String getDefaultEventBusId() {
State destinationCfg = getDestination().getProperties();
String eventBusIdKey =
ForkOperatorUtils.getPathForBranch(destinationCfg, FULL_EVENTBUSID_KEY, getBranches(),
getBranch());
if (destinationCfg.contains(eventBusIdKey)) {
return destinationCfg.getProp(eventBusIdKey);
}
else {
return WriterUtils.getWriterOutputDir(destinationCfg,
getBranches(),
getBranch())
.toString();
}
}
public String getEventBusId() {
if (! _eventBusId.isPresent()) {
_eventBusId = Optional.of(getDefaultEventBusId());
}
return _eventBusId.get();
}
public Builder withEventBusId(String eventBusId) {
_eventBusId = Optional.of(eventBusId);
return this;
}
public Mode getDefaultMode() {
try {
State destinationCfg = getDestination().getProperties();
String modeKey = ForkOperatorUtils.getPathForBranch(destinationCfg, FULL_MODE_KEY, getBranches(), getBranch());
return Mode.valueOf(destinationCfg.getProp(modeKey, Mode.POST_RECORDS.name()).toUpperCase());
} catch (Throwable t) {
return Mode.POST_RECORDS;
}
}
@Override public GobblinTestEventBusWriter build() throws IOException {
return new GobblinTestEventBusWriter(getEventBusId(), getDefaultMode());
}
}
@Data
public static class RunSummary {
private final long recordsWritten;
private final long timeElapsedMillis;
}
}
| 3,079 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/SchemaBasedWriterPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.partitioner;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.State;
/**
* A {@link WriterPartitioner} that partitions a record based on its schema. Partition record is returned with
* field {@link #SCHEMA_STRING} containing the record's schema as a string.
*/
public class SchemaBasedWriterPartitioner implements WriterPartitioner<GenericRecord> {
public static final String SCHEMA_STRING = "schemaString";
private static final Schema SCHEMA = SchemaBuilder.record("Schema").namespace("gobblin.writer.partitioner")
.fields().name(SCHEMA_STRING).type(Schema.create(Schema.Type.STRING)).noDefault().endRecord();
public SchemaBasedWriterPartitioner(State state, int numBranches, int branchId) {}
@Override
public Schema partitionSchema() {
return SCHEMA;
}
@Override
public GenericRecord partitionForRecord(GenericRecord record) {
GenericRecord partition = new GenericData.Record(SCHEMA);
partition.put(SCHEMA_STRING, record.getSchema().toString());
return partition;
}
}
| 3,080 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/WorkUnitStateWriterPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.partitioner;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* A #{@link TimeBasedWriterPartitioner} that partitions an incoming set of records based purely
* on WorkUnitState.
*/
public class WorkUnitStateWriterPartitioner extends TimeBasedWriterPartitioner<Object> {
private final long timestamp;
public WorkUnitStateWriterPartitioner(State state, int numBranches, int branches) {
super(state, numBranches, branches);
this.timestamp = calculateTimestamp(state);
}
@Override
public long getRecordTimestamp(Object record) {
return timestamp;
}
private long calculateTimestamp(State state) {
long timestamp = state.getPropAsLong(ConfigurationKeys.WORK_UNIT_DATE_PARTITION_KEY, -1L);
if (timestamp == -1L) {
throw new IllegalArgumentException(
"WORK_UNIT_DATE_PARTITION_KEY not present in WorkUnitState; is source an instance of DatePartitionedAvroFileSource?");
}
return timestamp;
}
}
| 3,081 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedWriterPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.partitioner;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.SchemaBuilder.FieldAssembler;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.StringUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.DatePartitionType;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* A {@link WriterPartitioner} that partitions a record based on a timestamp.
*
* There are two ways to partition a timestamp: (1) specify a {@link DateTimeFormat} using
* {@link #WRITER_PARTITION_PATTERN}, e.g., 'yyyy/MM/dd/HH'; (2) specify a
* {@link DatePartitionType} using {@link #WRITER_PARTITION_GRANULARITY}.
*
* A prefix and a suffix can be added to the partition, e.g., the partition path can be
* 'prefix/2015/11/05/suffix'.
*
* @author Ziyang Liu
*/
public abstract class TimeBasedWriterPartitioner<D> implements WriterPartitioner<D> {
public static final String WRITER_PARTITION_PREFIX = ConfigurationKeys.WRITER_PREFIX + ".partition.prefix";
public static final String WRITER_PARTITION_SUFFIX = ConfigurationKeys.WRITER_PREFIX + ".partition.suffix";
public static final String WRITER_PARTITION_PATTERN = ConfigurationKeys.WRITER_PREFIX + ".partition.pattern";
public static final String WRITER_PARTITION_TIMEZONE = ConfigurationKeys.WRITER_PREFIX + ".partition.timezone";
public static final String DEFAULT_WRITER_PARTITION_TIMEZONE = ConfigurationKeys.PST_TIMEZONE_NAME;
public static final String WRITER_PARTITION_TIMEUNIT = ConfigurationKeys.WRITER_PREFIX + ".partition.timeUnit";
public static final String DEFAULT_WRITER_PARTITION_TIMEUNIT = TimeUnit.MILLISECONDS.name();
public static final String WRITER_PARTITION_GRANULARITY = ConfigurationKeys.WRITER_PREFIX + ".partition.granularity";
public static final DatePartitionType DEFAULT_WRITER_PARTITION_GRANULARITY = DatePartitionType.HOUR;
public static final String PARTITIONED_PATH = "partitionedPath";
public static final String PREFIX = "prefix";
public static final String SUFFIX = "suffix";
private final String writerPartitionPrefix;
private final String writerPartitionSuffix;
private final DatePartitionType granularity;
private final DateTimeZone timeZone;
@Getter
protected final TimeUnit timeUnit;
private final Optional<DateTimeFormatter> timestampToPathFormatter;
private final Schema schema;
public TimeBasedWriterPartitioner(State state, int numBranches, int branchId) {
this.writerPartitionPrefix = getWriterPartitionPrefix(state, numBranches, branchId);
this.writerPartitionSuffix = getWriterPartitionSuffix(state, numBranches, branchId);
this.granularity = getGranularity(state, numBranches, branchId);
this.timeZone = getTimeZone(state, numBranches, branchId);
this.timeUnit = getTimeUnit(state, numBranches, branchId);
this.timestampToPathFormatter = getTimestampToPathFormatter(state, numBranches, branchId);
this.schema = getSchema();
}
private static String getWriterPartitionPrefix(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_PREFIX, numBranches, branchId);
return state.getProp(propName, StringUtils.EMPTY);
}
private static String getWriterPartitionSuffix(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_SUFFIX, numBranches, branchId);
return state.getProp(propName, StringUtils.EMPTY);
}
private static DatePartitionType getGranularity(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_GRANULARITY, numBranches, branchId);
String granularityValue = state.getProp(propName, DEFAULT_WRITER_PARTITION_GRANULARITY.toString());
Optional<DatePartitionType> granularity =
Enums.getIfPresent(DatePartitionType.class, granularityValue.toUpperCase());
Preconditions.checkState(granularity.isPresent(),
granularityValue + " is not a valid writer partition granularity");
return granularity.get();
}
private Optional<DateTimeFormatter> getTimestampToPathFormatter(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_PATTERN, numBranches, branchId);
if (state.contains(propName)) {
return Optional.of(DateTimeFormat.forPattern(state.getProp(propName)).withZone(this.timeZone));
}
return Optional.absent();
}
private static DateTimeZone getTimeZone(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_TIMEZONE, numBranches, branchId);
return DateTimeZone.forID(state.getProp(propName, DEFAULT_WRITER_PARTITION_TIMEZONE));
}
private static TimeUnit getTimeUnit(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_TIMEUNIT, numBranches, branchId);
return TimeUnit.valueOf(state.getProp(propName, DEFAULT_WRITER_PARTITION_TIMEUNIT).toUpperCase());
}
private Schema getSchema() {
if (this.timestampToPathFormatter.isPresent()) {
return getDateTimeFormatBasedSchema();
}
return getGranularityBasedSchema();
}
@Override
public Schema partitionSchema() {
return this.schema;
}
@SuppressWarnings("fallthrough")
@Override
public GenericRecord partitionForRecord(D record) {
long timestamp = timeUnit.toMillis(getRecordTimestamp(record));
GenericRecord partition = new GenericData.Record(this.schema);
if (!Strings.isNullOrEmpty(this.writerPartitionPrefix)) {
partition.put(PREFIX, this.writerPartitionPrefix);
}
if (!Strings.isNullOrEmpty(this.writerPartitionSuffix)) {
partition.put(SUFFIX, this.writerPartitionSuffix);
}
if (this.timestampToPathFormatter.isPresent()) {
String partitionedPath = getPartitionedPath(timestamp);
partition.put(PARTITIONED_PATH, partitionedPath);
} else {
DateTime dateTime = new DateTime(timestamp, this.timeZone);
partition.put(this.granularity.toString(), this.granularity.getField(dateTime));
}
return partition;
}
private Schema getDateTimeFormatBasedSchema() {
FieldAssembler<Schema> assembler =
SchemaBuilder.record("GenericRecordTimePartition").namespace("gobblin.writer.partitioner").fields();
if (!Strings.isNullOrEmpty(this.writerPartitionPrefix)) {
assembler = assembler.name(PREFIX).type(Schema.create(Schema.Type.STRING)).noDefault();
}
assembler = assembler.name(PARTITIONED_PATH).type(Schema.create(Schema.Type.STRING)).noDefault();
if (!Strings.isNullOrEmpty(this.writerPartitionSuffix)) {
assembler = assembler.name(SUFFIX).type(Schema.create(Schema.Type.STRING)).noDefault();
}
return assembler.endRecord();
}
@SuppressWarnings("fallthrough")
private Schema getGranularityBasedSchema() {
FieldAssembler<Schema> assembler =
SchemaBuilder.record("GenericRecordTimePartition").namespace("gobblin.writer.partitioner").fields();
// Construct the fields in reverse order
if (!Strings.isNullOrEmpty(this.writerPartitionSuffix)) {
assembler = assembler.name(SUFFIX).type(Schema.create(Schema.Type.STRING)).noDefault();
}
assembler = assembler.name(this.granularity.toString()).type(Schema.create(Schema.Type.STRING)).noDefault();
if (!Strings.isNullOrEmpty(this.writerPartitionPrefix)) {
assembler = assembler.name(PREFIX).type(Schema.create(Schema.Type.STRING)).noDefault();
}
Schema schema = assembler.endRecord();
Collections.reverse(schema.getFields());
return schema;
}
private String getPartitionedPath(long timestamp) {
return this.timestampToPathFormatter.get().print(timestamp);
}
public abstract long getRecordTimestamp(D record);
} | 3,082 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedAvroWriterPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.partitioner;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* A {@link TimeBasedWriterPartitioner} for {@link GenericRecord}s.
*
* The {@link org.apache.avro.Schema.Field} that contains the timestamp can be specified using
* {@link WRITER_PARTITION_COLUMNS}, and multiple values can be specified, e.g., "header.timestamp,device.timestamp".
*
* If multiple values are specified, they will be tried in order. In the above example, if a record contains a valid
* "header.timestamp" field, its value will be used, otherwise "device.timestamp" will be used.
*
* If a record contains none of the specified fields, or if no field is specified, the current timestamp will be used.
*/
@Slf4j
public class TimeBasedAvroWriterPartitioner extends TimeBasedWriterPartitioner<GenericRecord> {
public static final String WRITER_PARTITION_COLUMNS = ConfigurationKeys.WRITER_PREFIX + ".partition.columns";
public static final String WRITER_PARTITION_ENABLE_PARSE_AS_STRING =
ConfigurationKeys.WRITER_PREFIX + ".partition.enableParseAsString";
private final Optional<List<String>> partitionColumns;
private final boolean enableParseAsString;
public TimeBasedAvroWriterPartitioner(State state) {
this(state, 1, 0);
}
public TimeBasedAvroWriterPartitioner(State state, int numBranches, int branchId) {
super(state, numBranches, branchId);
this.partitionColumns = getWriterPartitionColumns(state, numBranches, branchId);
this.enableParseAsString = getEnableParseAsString(state, numBranches, branchId);
log.info("Enable parse as string: {}", this.enableParseAsString);
}
private static Optional<List<String>> getWriterPartitionColumns(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_COLUMNS, numBranches, branchId);
log.info("Partition columns for dataset {} are: {}", state.getProp(ConfigurationKeys.DATASET_URN_KEY),
state.getProp(propName));
return state.contains(propName) ? Optional.of(state.getPropAsList(propName)) : Optional.<List<String>> absent();
}
private static boolean getEnableParseAsString(State state, int numBranches, int branchId) {
String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_ENABLE_PARSE_AS_STRING,
numBranches, branchId);
return state.getPropAsBoolean(propName, false);
}
@Override
public long getRecordTimestamp(GenericRecord record) {
return getRecordTimestamp(getWriterPartitionColumnValue(record));
}
/**
* Check if the partition column value is present and is a Long object. Otherwise, use current system time.
*/
protected long getRecordTimestamp(Optional<Object> writerPartitionColumnValue) {
if (writerPartitionColumnValue.isPresent()) {
Object val = writerPartitionColumnValue.get();
if (val instanceof Long) {
return (Long) val;
} else if (enableParseAsString) {
return Long.parseLong(val.toString());
}
}
// Default to current time
return timeUnit.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
/**
* Retrieve the value of the partition column field specified by this.partitionColumns
*/
protected Optional<Object> getWriterPartitionColumnValue(GenericRecord record) {
if (!this.partitionColumns.isPresent()) {
return Optional.absent();
}
for (String partitionColumn : this.partitionColumns.get()) {
Optional<Object> fieldValue = AvroUtils.getFieldValue(record, partitionColumn);
if (fieldValue.isPresent()) {
return fieldValue;
}
}
return Optional.absent();
}
}
| 3,083 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/ObjectStoreWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore;
import java.io.IOException;
import lombok.Getter;
import com.codahale.metrics.Counter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.writer.InstrumentedDataWriter;
/**
* A writer to execute operations on a object in any object store. The record type of this writer is an {@link ObjectStoreOperation}.
* The {@link ObjectStoreOperation} encapsulates operation specific metadata and actions.
*/
@Alpha
@SuppressWarnings("rawtypes")
public class ObjectStoreWriter extends InstrumentedDataWriter<ObjectStoreOperation> {
private static final String OPERATIONS_EXECUTED_COUNTER = "gobblin.objectStoreWriter.operationsExecuted";
private final Counter operationsExecuted;
@Getter
private final ObjectStoreClient objectStoreClient;
public ObjectStoreWriter(ObjectStoreClient client, State state) {
super(state);
this.objectStoreClient = client;
this.operationsExecuted = this.getMetricContext().counter(OPERATIONS_EXECUTED_COUNTER);
}
@Override
public void close() throws IOException {
this.objectStoreClient.close();
}
/**
* Calls {@link ObjectStoreOperation#execute(ObjectStoreClient)} on the <code>operation</code> passed
*
* {@inheritDoc}
* @see org.apache.gobblin.writer.DataWriter#write(java.lang.Object)
*/
@Override
public void writeImpl(ObjectStoreOperation operation) throws IOException {
operation.execute(this.getObjectStoreClient());
this.operationsExecuted.inc();
}
@Override
public void commit() throws IOException {
}
@Override
public void cleanup() throws IOException {
this.getObjectStoreClient().close();
}
@Override
public long recordsWritten() {
return this.operationsExecuted.getCount();
}
@Override
public long bytesWritten() throws IOException {
// TODO Will be added when ObjectStorePutOperation is implemented. Currently we only support ObjectStoreDeleteOperation
return 0;
}
}
| 3,084 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/ObjectStoreClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore;
import java.io.IOException;
import java.io.InputStream;
import javax.annotation.Nonnull;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.writer.objectstore.response.GetObjectResponse;
/**
* A client interface to interact with an object store. Supports basic operations like put,delete and get.
*/
@Alpha
public interface ObjectStoreClient {
/**
* Puts an object in <code>objectStream</code> to the store. Returns the id of the object created.
*
* @param objectStream to put in the store
* @param putConfig additional config if any (User metadata, put options etc.)
* @return the id of newly created object
* @throws IOException if put failed
*/
public byte[] put(InputStream objectStream, Config putConfig) throws IOException;
/**
* Puts an object in <code>objectStream</code> to the store at <code>objectId</code>.
* Returns the id of the object created.
*
* @param objectStream to put in the store
* @param objectId to put in the store
* @param putConfig additional config if any (User metadata, put options etc.)
* @return the id of newly created object
* @throws IOException if put failed
*/
public byte[] put(InputStream objectStream, @Nonnull byte[] objectId, Config putConfig) throws IOException;
/**
* Delete an object with <code>objectId</code> in the store. Operation is a noop if object does not exist
*
* @param objectId to delete
* @param deleteConfig additional config if any
* @throws IOException if delete failed
*/
public void delete(@Nonnull byte[] objectId, Config deleteConfig) throws IOException;
/**
* Get metadata associated with an object
* @param objectId for which metadata is retrieved
* @return object metadata
* @throws IOException if get object metadata fails
*/
public Config getObjectProps(@Nonnull byte[] objectId) throws IOException;
/**
* Set metadata associated with an object
* @param objectId for which metadata is set
* @param objectProps to set for this object
* @throws IOException if setting metadata fails
*/
public void setObjectProps(@Nonnull byte[] objectId, Config objectProps) throws IOException;
/**
* Get an object with id <code>objectId</code> stored in the store.
* @param objectId to retrieve
* @return a {@link GetObjectResponse} with an {@link InputStream} to the object and object metadata
* @throws IOException if object does not exist or there was a failure reading the object
*/
public GetObjectResponse getObject(@Nonnull byte[] objectId) throws IOException;
/**
* Close the client
* @throws IOException
*/
public void close() throws IOException;
}
| 3,085 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/ObjectStoreOperation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.converter.objectstore.ObjectStoreConverter;
/**
* An {@link ObjectStoreOperation} is the record type used by {@link ObjectStoreWriter}s and {@link ObjectStoreConverter}.
* This class represents an operation performed for an object in an object store. The store can be accessed using {@link ObjectStoreClient}.
* Some of the operations are DELETE, PUT, GET etc.
* Subclasses are specific operations, they need to implement the {@link #execute(ObjectStoreClient)} method to perform their
* operation on an object in the store.
*
* @param <T> Response type of the operation
*/
@Alpha
public abstract class ObjectStoreOperation<T> {
/**
* {@link ObjectStoreWriter} calls this method for every {@link ObjectStoreOperation}. This method should be used by
* the operation to make necessary calls to object store. The operation can use <code>objectStoreClient</code> to talk
* to the store
*
* @param objectStoreClient a client to the object store
* @return the response of this operation
* @throws IOException when the operation fails
*/
public abstract T execute(ObjectStoreClient objectStoreClient) throws IOException;
}
| 3,086 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/ObjectStoreDeleteOperation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore;
import java.io.IOException;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.commons.httpclient.HttpStatus;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.writer.objectstore.response.DeleteResponse;
/**
* An {@link ObjectStoreOperation} that deletes an object with <code>objectId</code> in the object store.
*/
@Alpha
@AllArgsConstructor(access=AccessLevel.PRIVATE)
@Getter
public class ObjectStoreDeleteOperation extends ObjectStoreOperation<DeleteResponse> {
/**
* Id of the object to be deleted
*/
private final byte[] objectId;
/**
* Additional delete configurations if any
*/
private final Config deleteConfig;
/**
* Calls {@link ObjectStoreClient#delete(String, Config)} for the object ot be deleted
*
* {@inheritDoc}
* @see org.apache.gobblin.writer.objectstore.ObjectStoreOperation#execute(org.apache.gobblin.writer.objectstore.ObjectStoreClient)
*/
@Override
public DeleteResponse execute(ObjectStoreClient objectStoreClient) throws IOException {
objectStoreClient.delete(this.objectId, this.deleteConfig);
return new DeleteResponse(HttpStatus.SC_ACCEPTED);
}
/**
* A builder to build new {@link ObjectStoreDeleteOperation}
*/
public static class Builder {
private byte[] objectId;
private Config deleteConfig;
public Builder withObjectId(byte[] objectId) {
this.objectId = objectId;
return this;
}
public Builder withDeleteConfig(Config deleteConfig) {
this.deleteConfig = deleteConfig;
return this;
}
public ObjectStoreDeleteOperation build() {
Preconditions.checkArgument(this.objectId != null, "Object Id needs to be set");
if (this.deleteConfig == null) {
this.deleteConfig = ConfigFactory.empty();
}
return new ObjectStoreDeleteOperation(this.objectId, this.deleteConfig);
}
}
}
| 3,087 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/ObjectStoreOperationBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore;
import org.apache.gobblin.annotation.Alpha;
/**
* Builder to build all types of {@link ObjectStoreOperation}s
*/
@Alpha
public class ObjectStoreOperationBuilder {
/**
* Get a builder to build a delete operation
* @return
*/
public static ObjectStoreDeleteOperation.Builder deleteBuilder() {
return new ObjectStoreDeleteOperation.Builder();
}
}
| 3,088 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/response/GetObjectResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore.response;
import java.io.InputStream;
import lombok.AllArgsConstructor;
import lombok.Getter;
import com.typesafe.config.Config;
import org.apache.gobblin.writer.objectstore.ObjectStoreClient;
/**
* The response of {@link ObjectStoreClient#getObject(String)} that holds an {@link InputStream} to the object and object
* metadata
*/
@AllArgsConstructor
@Getter
public class GetObjectResponse {
private final InputStream objectData;
private final Config objectConfig;
}
| 3,089 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/objectstore/response/DeleteResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.objectstore.response;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Response of a delete operation
*/
@AllArgsConstructor
@Getter
public class DeleteResponse {
private final int statusCode;
}
| 3,090 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/RestWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import org.apache.gobblin.converter.http.RestEntry;
import org.apache.gobblin.writer.DataWriter;
public class RestWriterBuilder extends AbstractHttpWriterBuilder<Void, RestEntry<String>, RestWriterBuilder> {
@Override
public DataWriter<RestEntry<String>> build() throws IOException {
validate();
return new RestWriter(this);
}
} | 3,091 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/HttpWriterDecoration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.net.URI;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
/**
* Defines the main extension points for the {@link AbstractHttpWriter}.
* @param D the type of the data records
*/
public interface HttpWriterDecoration<D> {
/** An extension point to select the HTTP server to connect to. */
URI chooseServerHost();
/**
* A callback triggered before attempting to connect to a new host. Subclasses can override this
* method to customize the connect logic.
* For example, they can implement OAuth authentication.*/
void onConnect(URI serverHost) throws IOException;
/**
* A callback that allows the subclasses to customize the construction of an HTTP request based on
* incoming records. Customization may include, setting the URL, headers, buffering, etc.
*
* @param record the new record to be written
* @param request the current request object; if absent the implementation is responsible of
* allocating a new object
* @return the current request object; if absent no further processing will happen
*
*/
Optional<HttpUriRequest> onNewRecord(D record);
/**
* An extension point to send the actual request to the remote server.
* @param request the request to be sent
* @return a future that allows access to the response. Response may be retrieved synchronously or
* asynchronously.
*/
ListenableFuture<CloseableHttpResponse> sendRequest(HttpUriRequest request) throws IOException ;
/**
* Customize the waiting for an HTTP response. Can add timeout logic.
* @param responseFuture the future object of the last sent request
*/
CloseableHttpResponse waitForResponse(ListenableFuture<CloseableHttpResponse> responseFuture);
/**
* Processes the response
* @param response
* @throws IOException if there was a problem reading the response
* @throws UnexpectedResponseException if the response was unexpected
*/
void processResponse(CloseableHttpResponse response) throws IOException, UnexpectedResponseException;
}
| 3,092 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/AbstractHttpWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.TimeUnit;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.conn.HttpClientConnectionManager;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.http.HttpClientConfiguratorLoader;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.FluentDataWriterBuilder;
import lombok.Getter;
@Getter
public abstract class AbstractHttpWriterBuilder<S, D, B extends AbstractHttpWriterBuilder<S, D, B>>
extends FluentDataWriterBuilder<S, D, B> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractHttpWriterBuilder.class);
public static final String CONF_PREFIX = "gobblin.writer.http.";
public static final String HTTP_CONN_MANAGER = "conn_mgr_type";
public static final String POOLING_CONN_MANAGER_MAX_TOTAL_CONN = "conn_mgr.pooling.max_conn_total";
public static final String POOLING_CONN_MANAGER_MAX_PER_CONN = "conn_mgr.pooling.max_per_conn";
public static final String REQUEST_TIME_OUT_MS_KEY = "req_time_out";
public static final String CONNECTION_TIME_OUT_MS_KEY = "conn_time_out";
public static final String STATIC_SVC_ENDPOINT = "static_svc_endpoint";
public static enum ConnManager {
POOLING,
BASIC;
}
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(REQUEST_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(5L))
.put(CONNECTION_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(5L))
.put(HTTP_CONN_MANAGER, ConnManager.BASIC.name())
.put(POOLING_CONN_MANAGER_MAX_TOTAL_CONN, 20)
.put(POOLING_CONN_MANAGER_MAX_PER_CONN, 2)
.build());
private State state = new State();
private Optional<HttpClientBuilder> httpClientBuilder = Optional.absent();
private HttpClientConnectionManager httpConnManager;
private long reqTimeOut;
private Optional<Logger> logger = Optional.absent();
private Optional<URI> svcEndpoint = Optional.absent();
/**
* For backward compatibility on how Fork creates writer, invoke fromState when it's called writeTo method.
* @param destination
* @return
*/
@Override
public B writeTo(Destination destination) {
super.writeTo(destination);
fromState(destination.getProperties());
return typedSelf();
}
public B fromState(State state) {
this.state = state;
Config config = ConfigBuilder.create().loadProps(state.getProperties(), CONF_PREFIX).build();
fromConfig(config);
return typedSelf();
}
public B fromConfig(Config config) {
config = config.withFallback(FALLBACK);
RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT)
.setSocketTimeout(config.getInt(REQUEST_TIME_OUT_MS_KEY))
.setConnectTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.setConnectionRequestTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.build();
getHttpClientBuilder().setDefaultRequestConfig(requestConfig);
if (config.hasPath(STATIC_SVC_ENDPOINT)) {
try {
svcEndpoint = Optional.of(new URI(config.getString(AbstractHttpWriterBuilder.STATIC_SVC_ENDPOINT)));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
String connMgrStr = config.getString(HTTP_CONN_MANAGER);
switch (ConnManager.valueOf(connMgrStr.toUpperCase())) {
case BASIC:
httpConnManager = new BasicHttpClientConnectionManager();
break;
case POOLING:
PoolingHttpClientConnectionManager poolingConnMgr = new PoolingHttpClientConnectionManager();
poolingConnMgr.setMaxTotal(config.getInt(POOLING_CONN_MANAGER_MAX_TOTAL_CONN));
poolingConnMgr.setDefaultMaxPerRoute(config.getInt(POOLING_CONN_MANAGER_MAX_PER_CONN));
httpConnManager = poolingConnMgr;
break;
default:
throw new IllegalArgumentException(connMgrStr + " is not supported");
}
LOG.info("Using " + httpConnManager.getClass().getSimpleName());
return typedSelf();
}
public HttpClientBuilder getDefaultHttpClientBuilder() {
HttpClientConfiguratorLoader clientConfiguratorLoader =
new HttpClientConfiguratorLoader(getState());
clientConfiguratorLoader.getConfigurator().setStatePropertiesPrefix(AbstractHttpWriterBuilder.CONF_PREFIX);
return clientConfiguratorLoader.getConfigurator().configure(getState())
.getBuilder().disableCookieManagement().useSystemProperties();
}
public HttpClientBuilder getHttpClientBuilder() {
if (!this.httpClientBuilder.isPresent()) {
this.httpClientBuilder = Optional.of(getDefaultHttpClientBuilder());
}
return this.httpClientBuilder.get();
}
public B withHttpClientBuilder(HttpClientBuilder builder) {
this.httpClientBuilder = Optional.of(builder);
return typedSelf();
}
public B withHttpClientConnectionManager(HttpClientConnectionManager connManager) {
this.httpConnManager = connManager;
return typedSelf();
}
public B withLogger(Logger logger) {
this.logger = Optional.fromNullable(logger);
return typedSelf();
}
void validate() {
Preconditions.checkNotNull(getState(), "State is required for " + this.getClass().getSimpleName());
Preconditions.checkNotNull(getHttpClientBuilder(), "HttpClientBuilder is required for " + this.getClass().getSimpleName());
Preconditions.checkNotNull(getHttpConnManager(), "HttpConnManager is required for " + this.getClass().getSimpleName());
}
}
| 3,093 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/UnexpectedResponseException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
/** Denotes that the HTTP Writer observed an unexpected response.*/
public class UnexpectedResponseException extends IOException {
private static final long serialVersionUID = 1L;
public UnexpectedResponseException() {
super();
}
public UnexpectedResponseException(String message) {
super(message);
}
public UnexpectedResponseException(Throwable cause) {
super(cause);
}
public UnexpectedResponseException(String message, Throwable cause) {
super(message, cause);
}
}
| 3,094 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/AbstractHttpWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.conn.ConnectionRequest;
import org.apache.http.conn.HttpClientConnectionManager;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.gobblin.instrumented.writer.InstrumentedDataWriter;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* Base class for HTTP writers. Defines the main extension points for different implementations.
*/
public abstract class AbstractHttpWriter<D> extends InstrumentedDataWriter<D> implements HttpWriterDecoration<D> {
// Immutable state
protected final Logger log;
protected final boolean debugLogEnabled;
protected final CloseableHttpClient client;
private final ListeningExecutorService singleThreadPool;
// Mutable state
private URI curHttpHost = null;
private long numRecordsWritten = 0L;
private long numBytesWritten = 0L; //AbstractHttpWriter won't update as it could be expensive.
Optional<HttpUriRequest> curRequest = Optional.absent();
class HttpClientConnectionManagerWithConnTracking extends DelegatingHttpClientConnectionManager {
public HttpClientConnectionManagerWithConnTracking(HttpClientConnectionManager fallback) {
super(fallback);
}
@Override
public ConnectionRequest requestConnection(HttpRoute route, Object state) {
try {
onConnect(new URI(route.getTargetHost().toURI()));
} catch (IOException | URISyntaxException e) {
throw new RuntimeException("onConnect() callback failure: " + e, e);
}
return super.requestConnection(route, state);
}
}
@SuppressWarnings("rawtypes")
public AbstractHttpWriter(AbstractHttpWriterBuilder builder) {
super(builder.getState());
this.log = builder.getLogger().isPresent() ? (Logger)builder.getLogger() : LoggerFactory.getLogger(this.getClass());
this.debugLogEnabled = this.log.isDebugEnabled();
HttpClientBuilder httpClientBuilder = builder.getHttpClientBuilder();
httpClientBuilder.setConnectionManager(new HttpClientConnectionManagerWithConnTracking(builder.getHttpConnManager()));
this.client = httpClientBuilder.build();
this.singleThreadPool = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
if (builder.getSvcEndpoint().isPresent()) {
setCurServerHost((URI) builder.getSvcEndpoint().get());
}
}
/**
* {@inheritDoc}
*/
@Override
public void cleanup() throws IOException {
this.client.close();
ExecutorsUtils.shutdownExecutorService(this.singleThreadPool, Optional.of(log));
}
/**
* {@inheritDoc}
*/
@Override
public void close() throws IOException {
cleanup();
super.close();
}
/**
* {@inheritDoc}
*/
@Override
public long recordsWritten() {
return this.numRecordsWritten;
}
/**
* {@inheritDoc}
*/
@Override
public long bytesWritten() throws IOException {
return this.numBytesWritten;
}
/**
* Send and process the request. If it's a retry request, skip onNewRecord method call and go straight sending request.
* {@inheritDoc}
*/
@Override
public void writeImpl(D record) throws IOException {
if (!isRetry()) {
//If currentRequest is still here, it means this is retry request.
//In this case, don't invoke onNewRecord again as onNewRecord is not guaranteed to be idempotent.
//(e.g: If you do batch processing duplicate record can go in, etc.)
curRequest = onNewRecord(record);
}
if (curRequest.isPresent()) {
ListenableFuture<CloseableHttpResponse> responseFuture = sendRequest(curRequest.get());
try (CloseableHttpResponse response = waitForResponse(responseFuture)) {
processResponse(response);
}
curRequest = Optional.absent(); //Clear request if successful
}
numRecordsWritten++;
}
/**
* Prior to commit, it will invoke flush method to flush any remaining item if writer uses batch
* {@inheritDoc}
* @see org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase#commit()
*/
@Override
public void commit() throws IOException {
flush();
super.commit();
}
/**
* If writer supports batch, override this method.
* (Be aware of failure and retry as flush can be called multiple times in case of failure @see SalesforceRestWriter )
*/
public void flush() { }
/**
* Sends request using single thread pool so that it can be easily terminated(use case: time out)
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.HttpWriterDecoration#sendRequest(org.apache.http.client.methods.HttpUriRequest)
*/
@Override
public ListenableFuture<CloseableHttpResponse> sendRequest(final HttpUriRequest request) throws IOException {
return singleThreadPool.submit(new Callable<CloseableHttpResponse>() {
@Override
public CloseableHttpResponse call() throws Exception {
return client.execute(request);
}
});
}
/**
* Checks if it's retry request.
* All successful request should make currentRequest absent. If currentRequest still exists, it means there was a failure.
* There's couple of methods need this indicator such as onNewRecord, since it is not a new record.
* @return true if current request it holds is retry.
*/
public boolean isRetry() {
return curRequest.isPresent();
}
/**
* Default implementation is to use HttpClients socket timeout which is waiting based on elapsed time between
* last packet sent from client till receive it from server.
*
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.HttpWriterDecoration#waitForResponse(com.google.common.util.concurrent.ListenableFuture)
*/
@Override
public CloseableHttpResponse waitForResponse(ListenableFuture<CloseableHttpResponse> responseFuture) {
try {
return responseFuture.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
/**
* Default implementation where any status code equal to or greater than 400 is regarded as a failure.
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.HttpWriterDecoration#processResponse(org.apache.http.HttpResponse)
*/
@Override
public void processResponse(CloseableHttpResponse response) throws IOException, UnexpectedResponseException {
if (response.getStatusLine().getStatusCode() >= 400) {
if (response.getEntity() != null) {
throw new RuntimeException("Failed. " + EntityUtils.toString(response.getEntity())
+ " , response: " + ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE));
}
throw new RuntimeException("Failed. Response: " + ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE));
}
}
public Logger getLog() {
return this.log;
}
public URI getCurServerHost() {
if (null == this.curHttpHost) {
setCurServerHost(chooseServerHost());
}
if (null == this.curHttpHost) {
throw new RuntimeException("No server host selected!");
}
return this.curHttpHost;
}
/** Clears the current http host so that next request will trigger a new selection using
* {@link #chooseServerHost() */
void clearCurServerHost() {
this.curHttpHost = null;
}
void setCurServerHost(URI curHttpHost) {
this.log.info("Setting current HTTP server host to: " + curHttpHost);
this.curHttpHost = curHttpHost;
}
}
| 3,095 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/RestJsonWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import com.google.gson.JsonObject;
import org.apache.gobblin.converter.http.RestEntry;
import org.apache.gobblin.writer.DataWriter;
/**
* Builder that builds RestJsonWriter
*/
public class RestJsonWriterBuilder extends AbstractHttpWriterBuilder<Void, RestEntry<JsonObject>, RestJsonWriterBuilder> {
@Override
public DataWriter<RestEntry<JsonObject>> build() throws IOException {
validate();
return new RestJsonWriter(this);
}
}
| 3,096 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/RestJsonWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import com.google.common.base.Optional;
import com.google.gson.JsonObject;
import org.apache.gobblin.converter.http.RestEntry;
/**
* Writes via Restful API that accepts JSON as a body
*/
public class RestJsonWriter extends HttpWriter<RestEntry<JsonObject>> {
public RestJsonWriter(AbstractHttpWriterBuilder builder) {
super(builder);
}
@Override
public Optional<HttpUriRequest> onNewRecord(RestEntry<JsonObject> record) {
HttpUriRequest uriRequest = RequestBuilder.post()
.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType())
.setUri(combineUrl(getCurServerHost(), record.getResourcePath()))
.setEntity(new StringEntity(record.getRestEntryVal().toString(), ContentType.APPLICATION_JSON))
.build();
return Optional.of(uriRequest);
}
}
| 3,097 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/SalesForceRestWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.util.Properties;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.gson.JsonObject;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.converter.http.RestEntry;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.http.SalesforceRestWriter.Operation;
import lombok.AccessLevel;
import lombok.Getter;
/**
* Builder class that builds SalesForceRestWriter where it takes connection related parameter and type of operation along with the parameters
* derived from AbstractHttpWriterBuilder
*/
@Getter
public class SalesForceRestWriterBuilder extends AbstractHttpWriterBuilder<Void, RestEntry<JsonObject>, SalesForceRestWriterBuilder>{
static final String SFDC_PREFIX = "salesforce.";
static final String CLIENT_ID = SFDC_PREFIX + "client_id";
static final String CLIENT_SECRET = SFDC_PREFIX + "client_secret";
static final String USER_ID = SFDC_PREFIX + "user_id";
static final String PASSWORD = SFDC_PREFIX + "password";
static final String SFDC_ENCRYPT_KEY_LOC = SFDC_PREFIX + ConfigurationKeys.ENCRYPT_KEY_LOC;
static final String USE_STRONG_ENCRYPTION = SFDC_PREFIX + "strong_encryption";
static final String SECURITY_TOKEN = SFDC_PREFIX + "security_token";
static final String OPERATION = SFDC_PREFIX + "operation";
static final String BATCH_SIZE = SFDC_PREFIX + "batch_size";
static final String BATCH_RESOURCE_PATH = SFDC_PREFIX + "batch_resource_path";
private static final Config FALLBACK = ConfigFactory.parseMap(
ImmutableMap.<String, String>builder()
.put(AbstractHttpWriterBuilder.STATIC_SVC_ENDPOINT, "https://login.salesforce.com/services/oauth2/token")
.put(SECURITY_TOKEN, "")
.put(BATCH_SIZE, "1")
.build()
);
private String clientId;
private String clientSecret;
private String userId;
private String password;
private String securityToken;
private Operation operation;
private int batchSize;
private Optional<String> batchResourcePath = Optional.absent();
@Getter(AccessLevel.NONE) private boolean initializedFromConfig = false;
@Override
public SalesForceRestWriterBuilder fromConfig(Config config) {
super.fromConfig(config);
initializedFromConfig = true;
config = config.withFallback(FALLBACK);
clientId = config.getString(CLIENT_ID);
clientSecret = config.getString(CLIENT_SECRET);
userId = config.getString(USER_ID);
password = config.getString(PASSWORD);
securityToken = config.getString(SECURITY_TOKEN);
operation = Operation.valueOf(config.getString(OPERATION).toUpperCase());
batchSize = config.getInt(BATCH_SIZE);
Preconditions.checkArgument(batchSize > 0, BATCH_SIZE + " cannot be negative: " + batchSize);
if (batchSize > 1) {
batchResourcePath = Optional.of(config.getString(BATCH_RESOURCE_PATH));
}
if (config.hasPath(SFDC_ENCRYPT_KEY_LOC)) {
Properties props = new Properties();
if (config.hasPath(USE_STRONG_ENCRYPTION)) {
props.put(ConfigurationKeys.ENCRYPT_USE_STRONG_ENCRYPTOR, config.getString(USE_STRONG_ENCRYPTION));
}
props.put(ConfigurationKeys.ENCRYPT_KEY_LOC, config.getString(SFDC_ENCRYPT_KEY_LOC));
password = PasswordManager.getInstance(props).readPassword(password);
}
return typedSelf();
}
@Override
public DataWriter<RestEntry<JsonObject>> build() throws IOException {
validate();
//From config is the only path to set the config and also validates required properties.
Preconditions.checkArgument(initializedFromConfig, this.getClass().getSimpleName() + " must be build via fromConfig method.");
Preconditions.checkArgument(getSvcEndpoint().isPresent(), "Service end point is required for Oauth2 end point of Salesforce.com");
return new SalesforceRestWriter(this);
}
}
| 3,098 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/RestWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import com.google.common.base.Optional;
import org.apache.gobblin.converter.http.RestEntry;
/**
* Writes via RESTful API that accepts plain text as a body and resource path from RestEntry
*/
public class RestWriter extends HttpWriter<RestEntry<String>> {
public RestWriter(RestWriterBuilder builder) {
super(builder);
}
@Override
public Optional<HttpUriRequest> onNewRecord(RestEntry<String> record) {
HttpUriRequest uriRequest = RequestBuilder.post()
.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.TEXT_PLAIN.getMimeType())
.setUri(combineUrl(getCurServerHost(), record.getResourcePath()))
.setEntity(new StringEntity(record.getRestEntryVal(), ContentType.TEXT_PLAIN))
.build();
return Optional.of(uriRequest);
}
}
| 3,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.