index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/dataset/Dataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.dataset;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* A class that represents a dataset whose data should be compacted.
*
* @author Ziyang Liu
*/
@Slf4j
public class Dataset implements Comparable<Dataset>, FileSystemDataset {
public static final double DEFAULT_PRIORITY = 1.0;
public static final double DEFAULT_PRIORITY_REDUCTION_FACTOR = 1.0 / 3.0;
public enum DatasetState {
// The data completeness of this dataset has not been verified.
UNVERIFIED,
// The data completeness of this dataset has been verified.
VERIFIED,
// The data completeness of this dataset has timed out. In this case it is configurable whether the
// compactor should or should not compact this dataset.
GIVEN_UP,
// Compaction of this data set has been completed (which may have either succeeded or failed).
COMPACTION_COMPLETE
}
public static class Builder {
private Set<Path> inputPaths;
private Set<Path> inputLatePaths;
private Set<Path> renamePaths;
private Path outputPath;
private Path outputLatePath;
private Path outputTmpPath;
private String datasetName;
private double priority = DEFAULT_PRIORITY;
private State jobProps;
public Builder() {
this.inputPaths = Sets.newHashSet();
this.inputLatePaths = Sets.newHashSet();
this.renamePaths = Sets.newHashSet();
this.jobProps = new State();
}
@Deprecated
private double lateDataThresholdForRecompact;
@Deprecated
public Builder withInputPath(Path inputPath) {
return addInputPath(inputPath);
}
@Deprecated
public Builder withInputLatePath(Path inputLatePath) {
return addInputLatePath(inputLatePath);
}
public Builder addInputPath(Path inputPath) {
this.inputPaths.add(inputPath);
return this;
}
public Builder addInputLatePath(Path inputLatePath) {
this.inputLatePaths.add(inputLatePath);
return this;
}
public Builder withOutputPath(Path outputPath) {
this.outputPath = outputPath;
return this;
}
public Builder withOutputLatePath(Path outputLatePath) {
this.outputLatePath = outputLatePath;
return this;
}
public Builder withOutputTmpPath(Path outputTmpPath) {
this.outputTmpPath = outputTmpPath;
return this;
}
public Builder withDatasetName(String name) {
this.datasetName = name;
return this;
}
public Builder withPriority(double priority) {
this.priority = priority;
return this;
}
public Builder withJobProp(String key, Object value) {
this.jobProps.setProp(key, value);
return this;
}
@Deprecated
public Builder withLateDataThresholdForRecompact(double lateDataThresholdForRecompact) {
this.lateDataThresholdForRecompact = lateDataThresholdForRecompact;
return this;
}
public Dataset build() {
return new Dataset(this);
}
}
private final Path outputPath;
private final Path outputLatePath;
private final Path outputTmpPath;
private final Set<Path> additionalInputPaths;
private final Collection<Throwable> throwables;
private Set<Path> inputPaths;
private Set<Path> inputLatePaths;
private State jobProps;
private double priority;
private boolean needToRecompact;
private final String datasetName;
private AtomicReference<DatasetState> state;
@Getter@Setter
private Set<Path> renamePaths;
@Deprecated
private double lateDataThresholdForRecompact;
private Dataset(Builder builder) {
this.inputPaths = builder.inputPaths;
this.inputLatePaths = builder.inputLatePaths;
this.outputPath = builder.outputPath;
this.outputLatePath = builder.outputLatePath;
this.outputTmpPath = builder.outputTmpPath;
this.additionalInputPaths = Sets.newHashSet();
this.throwables = Collections.synchronizedCollection(Lists.<Throwable> newArrayList());
this.priority = builder.priority;
this.lateDataThresholdForRecompact = builder.lateDataThresholdForRecompact;
this.state = new AtomicReference<>(DatasetState.UNVERIFIED);
this.datasetName = builder.datasetName;
this.jobProps = builder.jobProps;
this.renamePaths = builder.renamePaths;
}
/**
* An immutable copy of input paths that contains the data of this {@link Dataset} to be compacted.
*/
public Set<Path> inputPaths() {
return ImmutableSet.copyOf(this.inputPaths);
}
/**
* An immutable copy of paths that contains the late data of this {@link Dataset} to be compacted.
* Late input data may be generated if the input data is obtained from another compaction,
* e.g., if we run hourly compaction and daily compaction on a topic where the compacted hourly
* data is the input to the daily compaction.
*
* If this path contains any data and this {@link Dataset} is not already compacted, deduplication
* will be applied to this {@link Dataset}.
*/
public Set<Path> inputLatePaths() {
return ImmutableSet.copyOf(this.inputLatePaths);
}
/**
* Output path for the compacted data.
*/
public Path outputPath() {
return this.outputPath;
}
/**
* If {@link #outputPath()} is already compacted and new input data is found, those data can be copied
* to this path.
*/
public Path outputLatePath() {
return this.outputLatePath;
}
/**
* The path where the MR job writes output to. Data will be published to {@link #outputPath()} if the compaction
* is successful.
*/
public Path outputTmpPath() {
return this.outputTmpPath;
}
public String getDatasetName() {
return this.datasetName;
}
public boolean needToRecompact() {
return this.needToRecompact;
}
/**
* Additional paths of this {@link Dataset} besides {@link #inputPaths()} that contain data to be compacted.
*/
public Set<Path> additionalInputPaths() {
return this.additionalInputPaths;
}
/**
* Add an additional input path for this {@link Dataset}.
*/
public void addAdditionalInputPath(Path path) {
this.additionalInputPaths.add(path);
}
/**
* Add additional input paths for this {@link Dataset}.
*/
public void addAdditionalInputPaths(Collection<Path> paths) {
this.additionalInputPaths.addAll(paths);
}
public double priority() {
return this.priority;
}
public DatasetState state() {
return this.state.get();
}
/**
* Reduce the priority of the dataset by {@link #DEFAULT_PRIORITY_REDUCTION_FACTOR}.
* @return the reduced priority
*/
public double reducePriority() {
return reducePriority(DEFAULT_PRIORITY_REDUCTION_FACTOR);
}
/**
* Reduce the priority of the dataset.
* @param reductionFactor the reduction factor. The priority will be reduced by reductionFactor.
* @return the reduced priority
*/
public double reducePriority(double reductionFactor) {
this.priority *= 1.0 - reductionFactor;
return this.priority;
}
public void checkIfNeedToRecompact(DatasetHelper datasetHelper) {
if (datasetHelper.getCondition().isRecompactionNeeded(datasetHelper)) {
this.needToRecompact = true;
}
}
@Deprecated
public void checkIfNeedToRecompact(long lateDataCount, long nonLateDataCount) {
double lateDataPercent = lateDataCount * 1.0 / (lateDataCount + nonLateDataCount);
log.info("Late data percentage is " + lateDataPercent + " and threshold is " + this.lateDataThresholdForRecompact);
if (lateDataPercent > this.lateDataThresholdForRecompact) {
this.needToRecompact = true;
}
}
public void setState(DatasetState state) {
this.state.set(state);
}
/**
* Sets the {@link DatasetState} of the {@link Dataset} to the given updated value if the
* current value == the expected value.
*/
public void compareAndSetState(DatasetState expect, DatasetState update) {
this.state.compareAndSet(expect, update);
}
public State jobProps() {
return this.jobProps;
}
public void setJobProps(State jobProps) {
this.jobProps.addAll(jobProps);
}
public void setJobProp(String key, Object value) {
this.jobProps.setProp(key, value);
}
/**
* Overwrite current inputPaths with newInputPath
*/
public void overwriteInputPath(Path newInputPath) {
this.inputPaths = Sets.newHashSet(newInputPath);
}
public void overwriteInputPaths(Set<Path> newInputPaths) {
this.inputPaths = newInputPaths;
}
/**
* Overwrite current inputLatePaths with newInputLatePath
*/
public void overwriteInputLatePath(Path newInputLatePath) {
this.inputLatePaths = Sets.newHashSet(newInputLatePath);
}
public void resetNeedToRecompact() {
this.needToRecompact = false;
}
private void cleanAdditionalInputPath () {
this.additionalInputPaths.clear();
}
/**
* Modify an existing dataset to recompact from its ouput path.
*/
public void modifyDatasetForRecompact(State recompactState) {
if (!this.jobProps().getPropAsBoolean(MRCompactor.COMPACTION_RECOMPACT_ALL_DATA, MRCompactor.DEFAULT_COMPACTION_RECOMPACT_ALL_DATA)) {
this.overwriteInputPath(this.outputLatePath);
this.cleanAdditionalInputPath();
} else {
this.overwriteInputPath(this.outputPath);
this.overwriteInputLatePath(this.outputLatePath);
this.addAdditionalInputPath(this.outputLatePath);
}
this.setJobProps(recompactState);
this.resetNeedToRecompact();
}
/**
* Get dataset URN, which equals {@link #outputPath} by removing {@link MRCompactor#COMPACTION_JOB_DEST_PARTITION}
* and {@link MRCompactor#COMPACTION_DEST_SUBDIR}, if any.
*/
public String getUrn() {
return this.simplifyOutputPath().toString();
}
/**
* Get dataset name, which equals {@link Path#getName()} of {@link #outputPath} after removing
* {@link MRCompactor#COMPACTION_JOB_DEST_PARTITION} and {@link MRCompactor#COMPACTION_DEST_SUBDIR}, if any.
*/
public String getName() {
return this.simplifyOutputPath().getName();
}
private Path simplifyOutputPath() {
Path simplifiedPath = new Path(StringUtils.removeEnd(this.outputPath.toString(),
this.jobProps().getProp(MRCompactor.COMPACTION_JOB_DEST_PARTITION, StringUtils.EMPTY)));
simplifiedPath = new Path(StringUtils.removeEnd(simplifiedPath.toString(),
this.jobProps().getProp(MRCompactor.COMPACTION_DEST_SUBDIR, MRCompactor.DEFAULT_COMPACTION_DEST_SUBDIR)));
return simplifiedPath;
}
public Collection<Throwable> throwables() {
return this.throwables;
}
/**
* Record a {@link Throwable} in a {@link Dataset}.
*/
public void addThrowable(Throwable t) {
this.throwables.add(t);
}
/**
* Skip the {@link Dataset} by setting its {@link DatasetState} to {@link DatasetState#COMPACTION_COMPLETE},
* and record the given {@link Throwable} in the {@link Dataset}.
*/
public void skip(Throwable t) {
this.setState(DatasetState.COMPACTION_COMPLETE);
this.throwables.add(t);
}
@Override
public int compareTo(Dataset o) {
return Double.compare(o.priority, this.priority);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.inputPaths == null) ? 0 : this.inputPaths.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Dataset)) {
return false;
}
Dataset other = (Dataset) obj;
if (this.inputPaths == null) {
if (other.inputPaths != null) {
return false;
}
} else if (!this.inputPaths.equals(other.inputPaths)) {
return false;
}
return true;
}
/**
* @return the {@link Path} of the {@link Dataset}.
*/
@Override
public String toString() {
return this.inputPaths.toString();
}
@Override
public Path datasetRoot() {
return this.outputPath;
}
@Override
public String datasetURN() {
return this.datasetRoot().toString();
}
}
| 1,800 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/dataset/TimeBasedSubDirDatasetsFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.dataset;
import java.io.IOException;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Sets;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.DatasetFilterUtils;
/**
* An implementation {@link DatasetsFinder} based on time-based subdirs of the inputDir.
*
* {@link #inputDir} may contain multiple datasets. The path must follow some subdir and time-based pattern,
* which can be configured by compaction.*.subdir and compaction.timebased.folder.pattern.
* For example, the subdir name is 'daily' and time-based patterhn is 'YYYY/MM/dd'.
* A dataset will be created for each qualified folder that matches '[intputDir]/datasetName/daily/YYYY/MM/dd'.
*
* Dataset name is used for blacklist/whitelist, and finding high/normal priorities, and recompaction threshold.
*
* To control which folders to process, use properties compaction.timebased.min.time.ago and
* compaction.timebased.max.time.ago. The format is ?m?d?h, e.g., 3m or 2d10h.
*/
@Slf4j
public class TimeBasedSubDirDatasetsFinder extends DatasetsFinder {
private static final String COMPACTION_TIMEBASED_PREFIX = "compaction.timebased.";
/**
* Configuration properties related to time based compaction jobs.
*/
public static final String COMPACTION_TIMEBASED_FOLDER_PATTERN = COMPACTION_TIMEBASED_PREFIX + "folder.pattern";
public static final String DEFAULT_COMPACTION_TIMEBASED_FOLDER_PATTERN = "YYYY/MM/dd";
public static final String COMPACTION_TIMEBASED_SUBDIR_PATTERN = COMPACTION_TIMEBASED_PREFIX + "subdir.pattern";
public static final String DEFAULT_COMPACTION_TIMEBASED_SUBDIR_PATTERN = "*";
// The earliest dataset timestamp to be processed. Format = ?m?d?h.
public static final String COMPACTION_TIMEBASED_MAX_TIME_AGO = COMPACTION_TIMEBASED_PREFIX + "max.time.ago";
public static final String DEFAULT_COMPACTION_TIMEBASED_MAX_TIME_AGO = "3d";
// The latest dataset timestamp to be processed. Format = ?m?d?h.
public static final String COMPACTION_TIMEBASED_MIN_TIME_AGO = COMPACTION_TIMEBASED_PREFIX + "min.time.ago";
public static final String DEFAULT_COMPACTION_TIMEBASED_MIN_TIME_AGO = "1d";
// The latest compaction run time to be processed. Format = ?m?d?h.
public static final String MIN_RECOMPACTION_DURATION =
COMPACTION_TIMEBASED_PREFIX + "min.recompaction.duration";
// By default we don't apply this limitation
public static final String DEFAULT_MIN_RECOMPACTION_DURATION = "0h";
protected final String folderTimePattern;
protected final String subDirPattern;
protected final DateTimeZone timeZone;
protected final DateTimeFormatter timeFormatter;
protected final String inputSubDir;
protected final String inputLateSubDir;
protected final String destSubDir;
protected final String destLateSubDir;
@VisibleForTesting
public TimeBasedSubDirDatasetsFinder(State state, FileSystem fs) throws Exception {
super(state, fs);
this.inputSubDir = getInputSubDir();
this.inputLateSubDir = getInputLateSubDir();
this.destSubDir = getDestSubDir();
this.destLateSubDir = getDestLateSubDir();
this.folderTimePattern = getFolderPattern();
this.subDirPattern = getSubDirPattern();
this.timeZone = DateTimeZone
.forID(this.state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
this.timeFormatter = DateTimeFormat.forPattern(this.folderTimePattern).withZone(this.timeZone);
}
public TimeBasedSubDirDatasetsFinder(State state) throws Exception {
super(state);
this.inputSubDir = getInputSubDir();
this.inputLateSubDir = getInputLateSubDir();
this.destSubDir = getDestSubDir();
this.destLateSubDir = getDestLateSubDir();
this.folderTimePattern = getFolderPattern();
this.subDirPattern = getSubDirPattern();
this.timeZone = DateTimeZone
.forID(this.state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
this.timeFormatter = DateTimeFormat.forPattern(this.folderTimePattern).withZone(this.timeZone);
}
protected String getDatasetName(String path, String basePath) {
int startPos = path.indexOf(basePath) + basePath.length();
return StringUtils.removeStart(path.substring(startPos), "/");
}
/**
* Each subdir in {@link DatasetsFinder#inputDir} is considered a dataset, if it satisfies blacklist and whitelist.
*/
@Override
public Set<Dataset> findDistinctDatasets() throws IOException {
Set<Dataset> datasets = Sets.newHashSet();
for (FileStatus datasetsFileStatus : this.fs.globStatus(new Path(inputDir, subDirPattern))) {
log.info("Scanning directory : " + datasetsFileStatus.getPath().toString());
if (datasetsFileStatus.isDirectory()) {
String datasetName = getDatasetName(datasetsFileStatus.getPath().toString(), inputDir);
if (DatasetFilterUtils.survived(datasetName, this.blacklist, this.whitelist)) {
log.info("Found dataset: " + datasetName);
Path inputPath = new Path(this.inputDir, new Path(datasetName, this.inputSubDir));
Path inputLatePath = new Path(this.inputDir, new Path(datasetName, this.inputLateSubDir));
Path outputPath = new Path(this.destDir, new Path(datasetName, this.destSubDir));
Path outputLatePath = new Path(this.destDir, new Path(datasetName, this.destLateSubDir));
Path outputTmpPath = new Path(this.tmpOutputDir, new Path(datasetName, this.destSubDir));
double priority = this.getDatasetPriority(datasetName);
String folderStructure = getFolderStructure();
for (FileStatus status : this.fs.globStatus(new Path(inputPath, folderStructure))) {
Path jobInputPath = status.getPath();
DateTime folderTime = null;
try {
folderTime = getFolderTime(jobInputPath, inputPath);
} catch (RuntimeException e) {
log.warn("{} is not a valid folder. Will be skipped due to exception.", jobInputPath, e);
continue;
}
if (folderWithinAllowedPeriod(jobInputPath, folderTime)) {
Path jobInputLatePath = appendFolderTime(inputLatePath, folderTime);
Path jobOutputPath = appendFolderTime(outputPath, folderTime);
Path jobOutputLatePath = appendFolderTime(outputLatePath, folderTime);
Path jobOutputTmpPath = appendFolderTime(outputTmpPath, folderTime);
Dataset timeBasedDataset = new Dataset.Builder().withPriority(priority)
.withDatasetName(datasetName)
.addInputPath(this.recompactDatasets ? jobOutputPath : jobInputPath)
.addInputLatePath(this.recompactDatasets ? jobOutputLatePath : jobInputLatePath)
.withOutputPath(jobOutputPath).withOutputLatePath(jobOutputLatePath)
.withOutputTmpPath(jobOutputTmpPath).build();
// Stores the extra information for timeBasedDataset
timeBasedDataset.setJobProp(MRCompactor.COMPACTION_JOB_DEST_PARTITION,
folderTime.toString(this.timeFormatter));
timeBasedDataset.setJobProp(MRCompactor.COMPACTION_INPUT_PATH_TIME, folderTime.getMillis());
datasets.add(timeBasedDataset);
}
}
}
}
}
return datasets;
}
private String getInputSubDir() {
return this.state.getProp(MRCompactor.COMPACTION_INPUT_SUBDIR, MRCompactor.DEFAULT_COMPACTION_INPUT_SUBDIR);
}
private String getInputLateSubDir() {
return this.state.getProp(MRCompactor.COMPACTION_INPUT_SUBDIR, MRCompactor.DEFAULT_COMPACTION_INPUT_SUBDIR)
+ MRCompactor.COMPACTION_LATE_DIR_SUFFIX;
}
private String getDestLateSubDir() {
return this.state.getProp(MRCompactor.COMPACTION_DEST_SUBDIR, MRCompactor.DEFAULT_COMPACTION_DEST_SUBDIR)
+ MRCompactor.COMPACTION_LATE_DIR_SUFFIX;
}
private String getDestSubDir() {
return this.state.getProp(MRCompactor.COMPACTION_DEST_SUBDIR, MRCompactor.DEFAULT_COMPACTION_DEST_SUBDIR);
}
protected String getFolderStructure() {
return this.folderTimePattern.replaceAll("[a-zA-Z0-9='-]+", "*");
}
private String getFolderPattern() {
String folderPattern =
this.state.getProp(COMPACTION_TIMEBASED_FOLDER_PATTERN, DEFAULT_COMPACTION_TIMEBASED_FOLDER_PATTERN);
log.info("Compaction folder pattern: " + folderPattern);
return folderPattern;
}
private String getSubDirPattern() {
String subdirPattern =
this.state.getProp(COMPACTION_TIMEBASED_SUBDIR_PATTERN, DEFAULT_COMPACTION_TIMEBASED_SUBDIR_PATTERN);
log.info("Compaction subdir pattern: " + subdirPattern);
return subdirPattern;
}
protected DateTime getFolderTime(Path path, Path basePath) {
int startPos = path.toString().indexOf(basePath.toString()) + basePath.toString().length();
return this.timeFormatter.parseDateTime(StringUtils.removeStart(path.toString().substring(startPos), "/"));
}
/**
* Return true iff input folder time is between compaction.timebased.min.time.ago and
* compaction.timebased.max.time.ago.
*/
protected boolean folderWithinAllowedPeriod(Path inputFolder, DateTime folderTime) {
DateTime currentTime = new DateTime(this.timeZone);
PeriodFormatter periodFormatter = getPeriodFormatter();
DateTime earliestAllowedFolderTime = getEarliestAllowedFolderTime(currentTime, periodFormatter);
DateTime latestAllowedFolderTime = getLatestAllowedFolderTime(currentTime, periodFormatter);
if (folderTime.isBefore(earliestAllowedFolderTime)) {
log.info(String.format("Folder time for %s is %s, earlier than the earliest allowed folder time, %s. Skipping",
inputFolder, folderTime, earliestAllowedFolderTime));
return false;
} else if (folderTime.isAfter(latestAllowedFolderTime)) {
log.info(String.format("Folder time for %s is %s, later than the latest allowed folder time, %s. Skipping",
inputFolder, folderTime, latestAllowedFolderTime));
return false;
} else {
return true;
}
}
public static PeriodFormatter getPeriodFormatter() {
return new PeriodFormatterBuilder().appendMonths().appendSuffix("m").appendDays().appendSuffix("d").appendHours()
.appendSuffix("h").appendMinutes().appendSuffix("min").toFormatter();
}
private DateTime getEarliestAllowedFolderTime(DateTime currentTime, PeriodFormatter periodFormatter) {
String maxTimeAgoStr =
this.state.getProp(COMPACTION_TIMEBASED_MAX_TIME_AGO, DEFAULT_COMPACTION_TIMEBASED_MAX_TIME_AGO);
Period maxTimeAgo = periodFormatter.parsePeriod(maxTimeAgoStr);
return currentTime.minus(maxTimeAgo);
}
private DateTime getLatestAllowedFolderTime(DateTime currentTime, PeriodFormatter periodFormatter) {
String minTimeAgoStr =
this.state.getProp(COMPACTION_TIMEBASED_MIN_TIME_AGO, DEFAULT_COMPACTION_TIMEBASED_MIN_TIME_AGO);
Period minTimeAgo = periodFormatter.parsePeriod(minTimeAgoStr);
return currentTime.minus(minTimeAgo);
}
protected Path appendFolderTime(Path path, DateTime folderTime) {
return new Path(path, folderTime.toString(this.timeFormatter));
}
}
| 1,801 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/dataset/SimpleDatasetsFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.dataset;
import java.io.IOException;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Sets;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
/**
* Implementation of {@link DatasetsFinder}. It simply takes {@link MRCompactor#COMPACTION_INPUT_DIR} as input,
* and {@link MRCompactor#COMPACTION_DEST_DIR} as output.
*/
public class SimpleDatasetsFinder extends DatasetsFinder {
public SimpleDatasetsFinder(State state) {
super(state);
}
/**
* Create a dataset using {@link #inputDir} and {@link #destDir}.
* Set dataset input path to be {@link #destDir} if {@link #recompactDatasets} is true.
*/
@Override
public Set<Dataset> findDistinctDatasets() throws IOException {
Set<Dataset> datasets = Sets.newHashSet();
Path inputPath = new Path(this.inputDir);
Path inputLatePath = new Path(inputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX);
Path outputPath = new Path(this.destDir);
Path outputLatePath = new Path(outputPath, MRCompactor.COMPACTION_LATE_DIR_SUFFIX);
Dataset dataset =
new Dataset.Builder().withPriority(this.getDatasetPriority(inputPath.getName()))
.addInputPath(this.recompactDatasets ? outputPath : inputPath)
.addInputLatePath(this.recompactDatasets ? outputLatePath : inputLatePath).withOutputPath(outputPath)
.withOutputLatePath(outputLatePath).withOutputTmpPath(new Path(this.tmpOutputDir)).build();
datasets.add(dataset);
return datasets;
}
}
| 1,802 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/dataset/DatasetsFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.dataset;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.HadoopUtils;
/**
* {@link Dataset}s finder to identify datasets, using given properties.
*/
public abstract class DatasetsFinder implements org.apache.gobblin.dataset.DatasetsFinder<Dataset> {
public static final double HIGH_PRIORITY = 3.0;
public static final double NORMAL_PRIORITY = 2.0;
public static final double LOW_PRIORITY = 1.0;
public static final String TMP_OUTPUT_SUBDIR = "output";
protected final State state;
protected final Configuration conf;
protected final FileSystem fs;
protected final String inputDir;
protected final String destDir;
protected final String tmpOutputDir;
protected final List<Pattern> blacklist;
protected final List<Pattern> whitelist;
protected final List<Pattern> highPriority;
protected final List<Pattern> normalPriority;
protected final boolean recompactDatasets;
public DatasetsFinder(State state) {
this(state, getFileSystem(state));
}
@VisibleForTesting
DatasetsFinder(State state, FileSystem fs) {
this.state = state;
this.conf = HadoopUtils.getConfFromState(state);
this.fs = fs;
this.inputDir = getInputDir();
this.destDir = getDestDir();
this.tmpOutputDir = getTmpOutputDir();
this.blacklist = DatasetFilterUtils.getPatternList(state, MRCompactor.COMPACTION_BLACKLIST);
this.whitelist = DatasetFilterUtils.getPatternList(state, MRCompactor.COMPACTION_WHITELIST);
setTopicsFromConfigStore(state);
this.highPriority = getHighPriorityPatterns();
this.normalPriority = getNormalPriorityPatterns();
this.recompactDatasets = getRecompactDatasets();
}
private void setTopicsFromConfigStore(State state) {
Set<String> blacklistTopicsFromConfigStore = new HashSet<>();
Set<String> whitelistTopicsFromConfigStore = new HashSet<>();
ConfigStoreUtils.setTopicsFromConfigStore(state.getProperties(), blacklistTopicsFromConfigStore,
whitelistTopicsFromConfigStore, MRCompactor.COMPACTION_BLACKLIST, MRCompactor.COMPACTION_WHITELIST);
this.blacklist.addAll(DatasetFilterUtils.getPatternsFromStrings(new ArrayList<>(blacklistTopicsFromConfigStore)));
this.whitelist.addAll(DatasetFilterUtils.getPatternsFromStrings(new ArrayList<>(whitelistTopicsFromConfigStore)));
}
/**
* Create a {@link Dataset}, which is comparable, using {@link #inputDir} and {@link #destDir}.
*/
public abstract Set<Dataset> findDistinctDatasets() throws IOException;
@Override
public List<Dataset> findDatasets() throws IOException {
return Lists.newArrayList(this.findDistinctDatasets());
}
/**
* @return {@link #destDir} shared by all {@link Dataset}s root paths.
*/
@Override
public Path commonDatasetRoot() {
return new Path(this.destDir);
}
private String getInputDir() {
Preconditions.checkArgument(this.state.contains(MRCompactor.COMPACTION_INPUT_DIR),
"Missing required property " + MRCompactor.COMPACTION_INPUT_DIR);
return this.state.getProp(MRCompactor.COMPACTION_INPUT_DIR);
}
private String getDestDir() {
Preconditions.checkArgument(this.state.contains(MRCompactor.COMPACTION_DEST_DIR),
"Missing required property " + MRCompactor.COMPACTION_DEST_DIR);
return this.state.getProp(MRCompactor.COMPACTION_DEST_DIR);
}
private String getTmpOutputDir() {
return new Path(this.state.getProp(MRCompactor.COMPACTION_TMP_DEST_DIR,
MRCompactor.DEFAULT_COMPACTION_TMP_DEST_DIR), TMP_OUTPUT_SUBDIR).toString();
}
private static FileSystem getFileSystem(State state) {
try {
if (state.contains(MRCompactor.COMPACTION_FILE_SYSTEM_URI)) {
URI uri = URI.create(state.getProp(MRCompactor.COMPACTION_FILE_SYSTEM_URI));
return FileSystem.get(uri, HadoopUtils.getConfFromState(state));
}
return FileSystem.get(HadoopUtils.getConfFromState(state));
} catch (IOException e) {
throw new RuntimeException("Failed to get filesystem for datasetsFinder.", e);
}
}
private List<Pattern> getHighPriorityPatterns() {
List<String> list = this.state.getPropAsList(MRCompactor.COMPACTION_HIGH_PRIORITY_TOPICS, StringUtils.EMPTY);
return DatasetFilterUtils.getPatternsFromStrings(list);
}
private List<Pattern> getNormalPriorityPatterns() {
List<String> list = this.state.getPropAsList(MRCompactor.COMPACTION_NORMAL_PRIORITY_TOPICS, StringUtils.EMPTY);
return DatasetFilterUtils.getPatternsFromStrings(list);
}
private boolean getRecompactDatasets() {
return this.state.getPropAsBoolean(MRCompactor.COMPACTION_RECOMPACT_FROM_DEST_PATHS,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_FROM_DEST_PATHS);
}
protected double getDatasetPriority(String datasetName) {
double priority = LOW_PRIORITY;
if (DatasetFilterUtils.stringInPatterns(datasetName, this.highPriority)) {
priority = HIGH_PRIORITY;
} else if (DatasetFilterUtils.stringInPatterns(datasetName, this.normalPriority)) {
priority = NORMAL_PRIORITY;
}
return priority;
}
}
| 1,803 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/dataset/DatasetHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.dataset;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.compaction.conditions.RecompactionCondition;
import org.apache.gobblin.compaction.conditions.RecompactionConditionFactory;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.RecordCountProvider;
import org.apache.gobblin.util.recordcount.LateFileRecordCountProvider;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A class {@link DatasetHelper} which provides runtime metrics and other helper functions for a given dataset.
*
* The class also contains different recompaction conditions {@link RecompactionCondition}, which indicates if a
* recompaction is needed. These conditions will be examined by {@link org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner}
* after late data was found and copied from inputDir to outputLateDir.
*/
public class DatasetHelper {
private final FileSystem fs;
private final Dataset dataset;
private final RecordCountProvider inputRecordCountProvider;
private final RecordCountProvider outputRecordCountProvider;
private final LateFileRecordCountProvider lateInputRecordCountProvider;
private final LateFileRecordCountProvider lateOutputRecordCountProvider;
private final RecompactionCondition condition;
private final Collection<String> extensions;
private static final Logger logger = LoggerFactory.getLogger(DatasetHelper.class);
public DatasetHelper(Dataset dataset, FileSystem fs, Collection<String> extensions) {
this.extensions = extensions;
this.fs = fs;
this.dataset = dataset;
this.condition = createRecompactionCondition();
try {
this.inputRecordCountProvider = (RecordCountProvider) Class
.forName(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_INPUT_RECORD_COUNT_PROVIDER,
MRCompactor.DEFAULT_COMPACTION_INPUT_RECORD_COUNT_PROVIDER))
.newInstance();
this.outputRecordCountProvider = (RecordCountProvider) Class
.forName(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER,
MRCompactor.DEFAULT_COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER))
.newInstance();
this.lateInputRecordCountProvider = new LateFileRecordCountProvider(this.inputRecordCountProvider);
this.lateOutputRecordCountProvider = new LateFileRecordCountProvider(this.outputRecordCountProvider);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate RecordCountProvider", e);
}
}
public Dataset getDataset () {
return dataset;
}
private RecompactionCondition createRecompactionCondition () {
ClassAliasResolver<RecompactionConditionFactory> conditionClassAliasResolver = new ClassAliasResolver<>(RecompactionConditionFactory.class);
String factoryName = this.dataset.jobProps().getProp(MRCompactor.COMPACTION_RECOMPACT_CONDITION,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_CONDITION);
try {
RecompactionConditionFactory factory = GobblinConstructorUtils.invokeFirstConstructor(
conditionClassAliasResolver.resolveClass(factoryName), ImmutableList.of());
return factory.createRecompactionCondition(dataset);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
public static List<Path> getApplicableFilePaths (FileSystem fs, Path dataDir, final Collection<String> extensions) throws IOException {
if (!fs.exists(dataDir)) {
return Lists.newArrayList();
}
List<Path> paths = Lists.newArrayList();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, dataDir, new PathFilter() {
@Override
public boolean accept(Path path) {
for (String validExtention : extensions) {
if (path.getName().endsWith(validExtention)) {
return true;
}
}
return false;
}
})) {
paths.add(fileStatus.getPath());
}
return paths;
}
public List<Path> getApplicableFilePaths (Path dataDir) throws IOException {
return getApplicableFilePaths(fs, dataDir, Lists.newArrayList("avro"));
}
public Optional<DateTime> getEarliestLateFileModificationTime() {
DateTimeZone timeZone = DateTimeZone
.forID(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
try {
long maxTimestamp = Long.MIN_VALUE;
for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, this.dataset.outputLatePath())) {
maxTimestamp = Math.max(maxTimestamp, status.getModificationTime());
}
return maxTimestamp == Long.MIN_VALUE ? Optional.<DateTime>absent():Optional.of(new DateTime(maxTimestamp, timeZone));
} catch (Exception e) {
logger.error("Failed to get earliest late file modification time");
return Optional.absent();
}
}
public DateTime getCurrentTime() {
DateTimeZone timeZone = DateTimeZone
.forID(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
DateTime currentTime = new DateTime(timeZone);
return currentTime;
}
public long getLateOutputRecordCount() {
long lateOutputRecordCount = 0l;
try {
Path outputLatePath = dataset.outputLatePath();
if (this.fs.exists(outputLatePath)) {
lateOutputRecordCount = this.lateOutputRecordCountProvider
.getRecordCount(this.getApplicableFilePaths(dataset.outputLatePath()));
}
} catch (Exception e) {
logger.error("Failed to get late record count:" + e, e);
}
return lateOutputRecordCount;
}
public long getOutputRecordCount() {
long outputRecordCount = 01;
try {
outputRecordCount = this.outputRecordCountProvider.
getRecordCount(this.getApplicableFilePaths(dataset.outputPath()));
return outputRecordCount;
} catch (Exception e) {
logger.error("Failed to submit late event count:" + e, e);
}
return outputRecordCount;
}
protected RecompactionCondition getCondition() {
return condition;
}
public long getLateOutputFileCount() {
long lateOutputFileCount = 0l;
try {
Path outputLatePath = dataset.outputLatePath();
if (this.fs.exists(outputLatePath)) {
lateOutputFileCount = getApplicableFilePaths(dataset.outputLatePath()).size();
logger.info("LateOutput File Count is : " + lateOutputFileCount + " at " + outputLatePath.toString());
}
} catch (Exception e) {
logger.error("Failed to get late file count from :" + e, e);
}
return lateOutputFileCount;
}
}
| 1,804 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/source/CompactionFailedTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.source;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.suite.CompactionSuite;
import org.apache.gobblin.compaction.suite.CompactionSuiteUtils;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.FailedTask;
import org.apache.gobblin.runtime.task.TaskIFace;
/**
* A task which throws an exception when executed
* The exception contains dataset information
*/
@Slf4j
public class CompactionFailedTask extends FailedTask {
protected final CompactionSuite suite;
protected final Dataset dataset;
protected final String failedReason;
public CompactionFailedTask (TaskContext taskContext) {
super(taskContext);
this.suite = CompactionSuiteUtils.getCompactionSuiteFactory (taskContext.getTaskState()).
createSuite(taskContext.getTaskState());
this.dataset = this.suite.load(taskContext.getTaskState());
this.failedReason = taskContext.getTaskState().getProp(CompactionVerifier.COMPACTION_VERIFICATION_FAIL_REASON);
}
@Override
public void run() {
log.error ("Compaction job for " + dataset.datasetURN() + " is failed because of {}", failedReason);
this.workingState = WorkUnitState.WorkingState.FAILED;
}
public static class CompactionFailedTaskFactory extends FailedTaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
return new CompactionFailedTask (taskContext);
}
}
}
| 1,805 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/source/CompactionSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.source;
import java.io.IOException;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTimeUtils;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.MRCompactionTaskFactory;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.suite.CompactionSuite;
import org.apache.gobblin.compaction.suite.CompactionSuiteUtils;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.data.management.dataset.DefaultFileSystemGlobFinder;
import org.apache.gobblin.data.management.dataset.SimpleDatasetRequest;
import org.apache.gobblin.data.management.dataset.SimpleDatasetRequestor;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.task.FailedTask;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.WorkUnitStreamSource;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.executors.IteratorExecutor;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.request_allocation.GreedyAllocator;
import org.apache.gobblin.util.request_allocation.HierarchicalAllocator;
import org.apache.gobblin.util.request_allocation.HierarchicalPrioritizer;
import org.apache.gobblin.util.request_allocation.RequestAllocator;
import org.apache.gobblin.util.request_allocation.RequestAllocatorConfig;
import org.apache.gobblin.util.request_allocation.RequestAllocatorUtils;
import org.apache.gobblin.util.request_allocation.ResourceEstimator;
import org.apache.gobblin.util.request_allocation.ResourcePool;
import static org.apache.gobblin.util.HadoopUtils.getSourceFileSystem;
/**
* A compaction source derived from {@link Source} which uses {@link DefaultFileSystemGlobFinder} to find all
* {@link Dataset}s. Use {@link CompactionSuite#getDatasetsFinderVerifiers()} to guarantee a given dataset has passed
* all verification. Each found dataset will be serialized to {@link WorkUnit} by {@link CompactionSuite#save(Dataset, State)}
*/
@Slf4j
public class CompactionSource implements WorkUnitStreamSource<String, String> {
public static final String COMPACTION_INIT_TIME = "compaction.init.time";
private CompactionSuite suite;
private Path tmpJobDir;
private FileSystem fs;
private RequestAllocator<SimpleDatasetRequest> allocator;
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
throw new UnsupportedOperationException("Please use getWorkunitStream");
}
@Override
public WorkUnitStream getWorkunitStream(SourceState state) {
try {
fs = getSourceFileSystem(state);
DatasetsFinder<Dataset> finder = DatasetUtils.instantiateDatasetFinder(state.getProperties(),
fs, DefaultFileSystemGlobFinder.class.getName());
List<Dataset> datasets = finder.findDatasets();
CompactionWorkUnitIterator workUnitIterator = new CompactionWorkUnitIterator();
if (datasets.size() == 0) {
workUnitIterator.done();
return new BasicWorkUnitStream.Builder(workUnitIterator).build();
}
// initialize iff datasets are found
initCompactionSource(state);
// Spawn a single thread to create work units
new Thread(new SingleWorkUnitGeneratorService(state, prioritize(datasets, state), workUnitIterator), "SingleWorkUnitGeneratorService").start();
return new BasicWorkUnitStream.Builder(workUnitIterator).build();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* A work unit generator service will do the following:
* 1) Convert dataset iterator to verification callable iterator, each callable element is a verification procedure
* 2) Use {@link IteratorExecutor} to execute callable iterator
* 3) Collect all failed datasets at step 2), retry them until timeout. Once timeout create failed workunits on purpose.
*/
private class SingleWorkUnitGeneratorService implements Runnable {
private SourceState state;
private List<Dataset> datasets;
private CompactionWorkUnitIterator workUnitIterator;
private IteratorExecutor executor;
public SingleWorkUnitGeneratorService (SourceState state, List<Dataset> datasets, CompactionWorkUnitIterator workUnitIterator) {
this.state = state;
this.datasets = datasets;
this.workUnitIterator = workUnitIterator;
}
public void run() {
try {
Stopwatch stopwatch = Stopwatch.createStarted();
int threads = this.state.getPropAsInt(CompactionVerifier.COMPACTION_VERIFICATION_THREADS, 5);
long timeOutInMinute = this.state.getPropAsLong(CompactionVerifier.COMPACTION_VERIFICATION_TIMEOUT_MINUTES, 30);
long iterationCountLimit = this.state.getPropAsLong(CompactionVerifier.COMPACTION_VERIFICATION_ITERATION_COUNT_LIMIT, 100);
long iteration = 0;
Map<String, String> failedReasonMap = null;
while (datasets.size() > 0 && iteration++ < iterationCountLimit) {
Iterator<Callable<VerifiedDataset>> verifierIterator =
Iterators.transform (datasets.iterator(), new Function<Dataset, Callable<VerifiedDataset>>() {
@Override
public Callable<VerifiedDataset> apply(Dataset dataset) {
return new DatasetVerifier (dataset, workUnitIterator, suite.getDatasetsFinderVerifiers(), state);
}
});
executor = new IteratorExecutor<>(verifierIterator, threads,
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("Verifier-compaction-dataset-pool-%d")));
List<Dataset> failedDatasets = Lists.newArrayList();
failedReasonMap = Maps.newHashMap();
List<Either<VerifiedDataset, ExecutionException>> futures = executor.executeAndGetResults();
for (Either<VerifiedDataset, ExecutionException> either: futures) {
if (either instanceof Either.Right) {
ExecutionException exc = ((Either.Right<VerifiedDataset, ExecutionException>) either).getRight();
DatasetVerificationException dve = (DatasetVerificationException) exc.getCause();
failedDatasets.add(dve.dataset);
failedReasonMap.put(dve.dataset.getUrn(), ExceptionUtils.getFullStackTrace(dve.cause));
} else {
VerifiedDataset vd = ((Either.Left<VerifiedDataset, ExecutionException>) either).getLeft();
if (!vd.verifiedResult.allVerificationPassed) {
if (vd.verifiedResult.shouldRetry) {
log.debug ("Dataset {} verification has failure but should retry", vd.dataset.datasetURN());
failedDatasets.add(vd.dataset);
failedReasonMap.put(vd.dataset.getUrn(), vd.verifiedResult.failedReason);
} else {
log.debug ("Dataset {} verification has failure but no need to retry", vd.dataset.datasetURN());
}
}
}
}
this.datasets = prioritize(failedDatasets, state);
if (stopwatch.elapsed(TimeUnit.MINUTES) > timeOutInMinute) {
break;
}
}
if (this.datasets.size() > 0) {
for (Dataset dataset: datasets) {
log.info ("{} is timed out and give up the verification, adding a failed task", dataset.datasetURN());
// create failed task for these failed datasets
this.workUnitIterator.addWorkUnit(createWorkUnitForFailure(dataset, failedReasonMap.get(dataset.getUrn())));
}
}
this.workUnitIterator.done();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
/**
* An non-extensible init method for {@link CompactionSource}, while it leaves
* extensible {@link #optionalInit(SourceState)} to derived class to adding customized initialization.
*
* Comparing to make this method protected directly, this approach is less error-prone since all initialization
* happening inside {@link #initCompactionSource(SourceState)} is compulsory.
*/
private void initCompactionSource(SourceState state) throws IOException {
state.setProp(COMPACTION_INIT_TIME, DateTimeUtils.currentTimeMillis());
suite = CompactionSuiteUtils.getCompactionSuiteFactory(state).createSuite(state);
initRequestAllocator(state);
initJobDir(state);
copyJarDependencies(state);
optionalInit(state);
}
protected void optionalInit(SourceState state) {
// do nothing.
}
private void initRequestAllocator (State state) {
try {
ResourceEstimator estimator = GobblinConstructorUtils.<ResourceEstimator>invokeLongestConstructor(
new ClassAliasResolver(ResourceEstimator.class).resolveClass(state.getProp(ConfigurationKeys.COMPACTION_ESTIMATOR,
SimpleDatasetRequest.SimpleDatasetCountEstimator.class.getName())));
RequestAllocatorConfig.Builder<SimpleDatasetRequest> configBuilder =
RequestAllocatorConfig.builder(estimator).allowParallelization(1).withLimitedScopeConfig(ConfigBuilder.create()
.loadProps(state.getProperties(), ConfigurationKeys.COMPACTION_PRIORITIZATION_PREFIX).build());
if (!state.contains(ConfigurationKeys.COMPACTION_PRIORITIZER_ALIAS)) {
allocator = new GreedyAllocator<>(configBuilder.build());
return;
}
Comparator<SimpleDatasetRequest> prioritizer = GobblinConstructorUtils.<Comparator>invokeLongestConstructor(
new ClassAliasResolver(Comparator.class).resolveClass(state.getProp(ConfigurationKeys.COMPACTION_PRIORITIZER_ALIAS)), state);
configBuilder.withPrioritizer(prioritizer);
if (prioritizer instanceof HierarchicalPrioritizer) {
allocator = new HierarchicalAllocator.Factory().createRequestAllocator(configBuilder.build());
} else {
allocator = RequestAllocatorUtils.inferFromConfig(configBuilder.build());
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Cannot initialize allocator", e);
}
}
private List<Dataset> prioritize (List<Dataset> datasets, State state) {
double maxPool = state.getPropAsDouble(MRCompactor.COMPACTION_DATASETS_MAX_COUNT, MRCompactor.DEFUALT_COMPACTION_DATASETS_MAX_COUNT);
ResourcePool pool = ResourcePool.builder().maxResource(SimpleDatasetRequest.SIMPLE_DATASET_COUNT_DIMENSION, maxPool).build();
Iterator<Dataset> newList = Iterators.transform(
this.allocator.allocateRequests(datasets.stream().map(SimpleDatasetRequestor::new).iterator(), pool), (input) -> input.getDataset());
return Lists.newArrayList(newList);
}
private static class DatasetVerificationException extends Exception {
private Dataset dataset;
private Throwable cause;
public DatasetVerificationException (Dataset dataset, Throwable cause) {
super ("Dataset:" + dataset.datasetURN() + " Exception:" + cause);
this.dataset = dataset;
this.cause = cause;
}
}
@AllArgsConstructor
private static class VerifiedDataset {
private Dataset dataset;
private VerifiedResult verifiedResult;
}
@AllArgsConstructor
private static class VerifiedResult {
private boolean allVerificationPassed;
private boolean shouldRetry;
private String failedReason;
}
@AllArgsConstructor
private class DatasetVerifier implements Callable {
private Dataset dataset;
private CompactionWorkUnitIterator workUnitIterator;
private List<CompactionVerifier> verifiers;
private State state;
/**
* {@link VerifiedDataset} wraps original {@link Dataset} because if verification failed, we are able get original
* datasets and restart the entire process of verification against those failed datasets.
*/
public VerifiedDataset call() throws DatasetVerificationException {
try {
VerifiedResult result = this.verify(dataset);
if (result.allVerificationPassed) {
this.workUnitIterator.addWorkUnit(createWorkUnit(dataset, state));
}
return new VerifiedDataset(dataset, result);
} catch (Exception e) {
throw new DatasetVerificationException(dataset, e);
}
}
public VerifiedResult verify (Dataset dataset) throws Exception {
boolean verificationPassed = true;
boolean shouldRetry = true;
String failedReason = "";
if (verifiers != null) {
for (CompactionVerifier verifier : verifiers) {
CompactionVerifier.Result rst = verifier.verify (dataset);
if (!rst.isSuccessful()) {
verificationPassed = false;
failedReason = rst.getFailureReason();
// Not all verification should be retried. Below are verifications which
// doesn't need retry. If any of then failed, we simply skip this dataset.
if (!verifier.isRetriable()) {
shouldRetry = false;
break;
}
}
}
}
return new VerifiedResult(verificationPassed, shouldRetry, failedReason);
}
}
/**
* Iterator that provides {@link WorkUnit}s for all verified {@link Dataset}s
*/
private static class CompactionWorkUnitIterator implements Iterator<WorkUnit> {
private LinkedBlockingDeque<WorkUnit> workUnits;
private WorkUnit last;
private AtomicBoolean isDone;
/**
* Constructor
*/
public CompactionWorkUnitIterator () {
this.workUnits = new LinkedBlockingDeque<>();
this.isDone = new AtomicBoolean(false);
this.last = null;
}
/**
* Check if any {@link WorkUnit} is available. The producer is {@link SingleWorkUnitGeneratorService}
* @return true when a new {@link WorkUnit} is available
* false when {@link CompactionWorkUnitIterator#isDone} is invoked
*/
public boolean hasNext () {
try {
while (true) {
if (last != null) return true;
if (this.isDone.get() && this.workUnits.isEmpty()) return false;
this.last = this.workUnits.poll(1, TimeUnit.SECONDS);
}
} catch (InterruptedException e) {
log.error(e.toString());
return false;
}
}
/**
* Stops the iteration so that {@link CompactionWorkUnitIterator#hasNext()} returns false
*/
public void done () {
this.isDone.set(true);
}
/**
* Obtain next available {@link WorkUnit}.
* The method will first query if any work unit is available by calling {@link CompactionWorkUnitIterator#hasNext()}
* Because {@link CompactionWorkUnitIterator#hasNext()} is a blocking call, this method can also be blocked.
*/
public WorkUnit next () {
if (hasNext()) {
if (last != null) {
WorkUnit tmp = last;
last = null;
return tmp;
} else {
throw new IllegalStateException("last variable cannot be empty");
}
}
throw new NoSuchElementException("work units queue has been exhausted");
}
public void remove() {
throw new UnsupportedOperationException("No remove supported on " + this.getClass().getName());
}
protected void addWorkUnit (WorkUnit wu) {
this.workUnits.add(wu);
}
}
protected WorkUnit createWorkUnit(Dataset dataset, State state) throws IOException {
WorkUnit workUnit = WorkUnit.createEmpty();
TaskUtils.setTaskFactoryClass(workUnit, MRCompactionTaskFactory.class);
suite.save(dataset, workUnit);
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, dataset.getUrn());
if (state.contains(ConfigurationKeys.JOB_NAME_KEY)) {
workUnit.setProp(ConfigurationKeys.JOB_NAME_KEY, state.getProp(ConfigurationKeys.JOB_NAME_KEY));
}
if (state.contains(ConfigurationKeys.JOB_ID_KEY)) {
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, state.getProp(ConfigurationKeys.JOB_ID_KEY));
}
return workUnit;
}
protected WorkUnit createWorkUnitForFailure (Dataset dataset) throws IOException {
WorkUnit workUnit = new FailedTask.FailedWorkUnit();
TaskUtils.setTaskFactoryClass(workUnit, CompactionFailedTask.CompactionFailedTaskFactory.class);
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, dataset.getUrn());
suite.save(dataset, workUnit);
return workUnit;
}
protected WorkUnit createWorkUnitForFailure (Dataset dataset, String reason) throws IOException {
WorkUnit workUnit = new FailedTask.FailedWorkUnit();
workUnit.setProp(CompactionVerifier.COMPACTION_VERIFICATION_FAIL_REASON, reason);
TaskUtils.setTaskFactoryClass(workUnit, CompactionFailedTask.CompactionFailedTaskFactory.class);
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, dataset.getUrn());
suite.save(dataset, workUnit);
return workUnit;
}
@Override
public Extractor getExtractor (WorkUnitState state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void shutdown (SourceState state) {
try {
boolean f = fs.delete(this.tmpJobDir, true);
log.info("Job dir is removed from {} with status {}", this.tmpJobDir, f);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Create a temporary job directory based on job id or (if not available) UUID
*/
private void initJobDir (SourceState state) throws IOException {
String tmpBase = state.getProp(MRCompactor.COMPACTION_TMP_DEST_DIR, MRCompactor.DEFAULT_COMPACTION_TMP_DEST_DIR);
String jobId;
if (state instanceof JobState) {
jobId = ((JobState) state).getJobId();
} else {
jobId = UUID.randomUUID().toString();
}
this.tmpJobDir = new Path (tmpBase, jobId);
this.fs.mkdirs(this.tmpJobDir);
state.setProp (MRCompactor.COMPACTION_JOB_DIR, tmpJobDir.toString());
log.info ("Job dir is created under {}", this.tmpJobDir);
}
/**
* Copy dependent jars to a temporary job directory on HDFS
*/
private void copyJarDependencies (State state) throws IOException {
if (this.tmpJobDir == null) {
throw new RuntimeException("Job directory is not created");
}
if (!state.contains(ConfigurationKeys.JOB_JAR_FILES_KEY)) {
return;
}
// create sub-dir to save jar files
LocalFileSystem lfs = FileSystem.getLocal(HadoopUtils.getConfFromState(state));
Path tmpJarFileDir = new Path(this.tmpJobDir, MRCompactor.COMPACTION_JAR_SUBDIR);
this.fs.mkdirs(tmpJarFileDir);
state.setProp(MRCompactor.COMPACTION_JARS, tmpJarFileDir.toString());
// copy jar files to hdfs
for (String jarFile : state.getPropAsList(ConfigurationKeys.JOB_JAR_FILES_KEY)) {
for (FileStatus status : lfs.globStatus(new Path(jarFile))) {
Path tmpJarFile = new Path(this.fs.makeQualified(tmpJarFileDir), status.getPath().getName());
this.fs.copyFromLocalFile(status.getPath(), tmpJarFile);
log.info(String.format("%s will be added to classpath", tmpJarFile));
}
}
}
}
| 1,806 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/parser/CompactionPathParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.parser;
import org.apache.commons.lang.StringUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* A parser which converts {@link FileSystemDataset} to {@link CompactionParserResult}
*/
@AllArgsConstructor
public class CompactionPathParser {
State state;
/**
* A parsed result returned by {@link CompactionPathParser#parse(FileSystemDataset)}
*/
public static class CompactionParserResult {
@Getter @Setter
private String srcBaseDir;
@Getter @Setter
private String dstBaseDir;
@Getter @Setter
private String srcSubDir;
@Getter @Setter
private String dstSubDir;
@Getter
private DateTime time;
@Getter
private String timeString;
@Getter
private String datasetName;
@Getter
private String dstAbsoluteDir;
}
/**
* Parse a {@link FileSystemDataset} to some detailed parts like source base directory,
* source sub directory, destination based directory, destination sub directory, and time
* information.
*/
public CompactionParserResult parse (FileSystemDataset dataset) {
CompactionParserResult result = new CompactionParserResult();
result.srcBaseDir = getSrcBaseDir (state);
result.srcSubDir = getSrcSubDir (state);
result.dstBaseDir = getDstBaseDir (state);
result.dstSubDir = getDstSubDir (state);
parseTimeAndDatasetName(dataset, result);
result.dstAbsoluteDir = Joiner.on("/").join (result.dstBaseDir,
result.datasetName,
result.dstSubDir,
result.timeString);
return result;
}
private void parseTimeAndDatasetName (FileSystemDataset dataset, CompactionParserResult rst) {
String commonBase = rst.getSrcBaseDir();
String fullPath = dataset.datasetURN();
int startPos = fullPath.indexOf(commonBase) + commonBase.length();
String relative = StringUtils.removeStart(fullPath.substring(startPos), "/");
int delimiterStart = StringUtils.indexOf(relative, rst.getSrcSubDir());
if (delimiterStart == -1) {
throw new StringIndexOutOfBoundsException();
}
int delimiterEnd = relative.indexOf("/", delimiterStart);
String datasetName = StringUtils.removeEnd(relative.substring(0, delimiterStart), "/");
String timeString = StringUtils.removeEnd(relative.substring(delimiterEnd + 1), "/");
rst.datasetName = datasetName;
rst.timeString = timeString;
rst.time = getTime (timeString);
}
private DateTime getTime (String timeString) {
DateTimeZone timeZone = DateTimeZone.forID(MRCompactor.DEFAULT_COMPACTION_TIMEZONE);
int splits = StringUtils.countMatches(timeString, "/");
String timePattern = "";
if (splits == 3) {
timePattern = "YYYY/MM/dd/HH";
} else if (splits == 2) {
timePattern = "YYYY/MM/dd";
}
DateTimeFormatter timeFormatter = DateTimeFormat.forPattern(timePattern).withZone(timeZone);
return timeFormatter.parseDateTime (timeString);
}
private String getSrcBaseDir(State state) {
Preconditions.checkArgument(state.contains(MRCompactor.COMPACTION_INPUT_DIR),
"Missing required property " + MRCompactor.COMPACTION_INPUT_DIR);
return state.getProp(MRCompactor.COMPACTION_INPUT_DIR);
}
private String getSrcSubDir(State state) {
Preconditions.checkArgument(state.contains(MRCompactor.COMPACTION_INPUT_SUBDIR),
"Missing required property " + MRCompactor.COMPACTION_INPUT_SUBDIR);
return state.getProp(MRCompactor.COMPACTION_INPUT_SUBDIR);
}
private String getDstBaseDir(State state) {
Preconditions.checkArgument(state.contains(MRCompactor.COMPACTION_DEST_DIR),
"Missing required property " + MRCompactor.COMPACTION_DEST_DIR);
return state.getProp(MRCompactor.COMPACTION_DEST_DIR);
}
private String getDstSubDir(State state) {
Preconditions.checkArgument(state.contains(MRCompactor.COMPACTION_DEST_SUBDIR),
"Missing required property " + MRCompactor.COMPACTION_DEST_SUBDIR);
return state.getProp(MRCompactor.COMPACTION_DEST_SUBDIR);
}
}
| 1,807 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionCompleteFileOperationAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner;
import org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase;
import org.apache.gobblin.compaction.mapreduce.RecordKeyMapperBase;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import static org.apache.gobblin.compaction.event.CompactionSlaEventHelper.*;
/**
* A type of post action {@link CompactionCompleteAction} which focus on the file operations
*
*/
@Slf4j
@AllArgsConstructor
public class CompactionCompleteFileOperationAction implements CompactionCompleteAction<FileSystemDataset> {
public final static String COMPACTION_DIRECTORY_FORMAT = "/compaction_%s";
protected WorkUnitState state;
private CompactionJobConfigurator configurator;
private InputRecordCountHelper helper;
private EventSubmitter eventSubmitter;
private FileSystem fs;
public CompactionCompleteFileOperationAction(State state, CompactionJobConfigurator configurator, InputRecordCountHelper helper) {
if (!(state instanceof WorkUnitState)) {
throw new UnsupportedOperationException(this.getClass().getName() + " only supports workunit state");
}
this.state = (WorkUnitState) state;
this.helper = helper;
this.configurator = configurator;
this.fs = configurator.getFs();
}
public CompactionCompleteFileOperationAction(State state, CompactionJobConfigurator configurator) {
this(state, configurator, new InputRecordCountHelper(state));
}
/**
* Replace or append the destination folder with new files from map-reduce job
* Create a record count file containing the number of records that have been processed .
*/
public void onCompactionJobComplete(FileSystemDataset dataset) throws IOException {
if (dataset.isVirtual()) {
return;
}
if (configurator != null && configurator.isJobCreated()) {
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
Path tmpPath = configurator.getMrOutputPath();
Path dstPath = new Path(result.getDstAbsoluteDir());
// this is append delta mode due to the compaction rename source dir mode being enabled
boolean appendDeltaOutput = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
Job job = this.configurator.getConfiguredJob();
long newTotalRecords = 0;
long oldTotalRecords = helper.readRecordCount(new Path(result.getDstAbsoluteDir()));
long executionCount = helper.readExecutionCount(new Path(result.getDstAbsoluteDir()));
List<Path> goodPaths = CompactionJobConfigurator.getGoodFiles(job, tmpPath, this.fs,
ImmutableList.of(configurator.getFileExtension()));
HashSet<Path> outputFiles = new HashSet<>();
if (appendDeltaOutput) {
FsPermission permission =
HadoopUtils.deserializeFsPermission(this.state, MRCompactorJobRunner.COMPACTION_JOB_OUTPUT_DIR_PERMISSION,
FsPermission.getDefault());
WriterUtils.mkdirsWithRecursivePermission(this.fs, dstPath, permission);
// append files under mr output to destination
for (Path filePath : goodPaths) {
String fileName = filePath.getName();
log.info(String.format("Adding %s to %s", filePath.toString(), dstPath));
Path outPath = new Path(dstPath, fileName);
if (!this.fs.rename(filePath, outPath)) {
throw new IOException(String.format("Unable to move %s to %s", filePath.toString(), outPath.toString()));
}
}
// Obtain record count from input file names.
// We don't get record count from map-reduce counter because in the next run, the threshold (delta record)
// calculation is based on the input file names. By pre-defining which input folders are involved in the
// MR execution, it is easy to track how many files are involved in MR so far, thus calculating the number of total records
// (all previous run + current run) is possible.
newTotalRecords = this.configurator.getFileNameRecordCount();
} else {
if (state.getPropAsBoolean(ConfigurationKeys.RECOMPACTION_WRITE_TO_NEW_FOLDER, false)) {
Path oldFilePath =
PathUtils.mergePaths(dstPath, new Path(String.format(COMPACTION_DIRECTORY_FORMAT, executionCount)));
dstPath =
PathUtils.mergePaths(dstPath, new Path(String.format(COMPACTION_DIRECTORY_FORMAT, executionCount + 1)));
this.configurator.getOldFiles().add(this.fs.makeQualified(oldFilePath).toString());
//Write to a new path, no need to delete the old path
} else {
this.configurator.getOldFiles().add(this.fs.makeQualified(dstPath).toString());
this.fs.delete(dstPath, true);
}
FsPermission permission =
HadoopUtils.deserializeFsPermission(this.state, MRCompactorJobRunner.COMPACTION_JOB_OUTPUT_DIR_PERMISSION,
FsPermission.getDefault());
WriterUtils.mkdirsWithRecursivePermission(this.fs, dstPath.getParent(), permission);
if (!this.fs.rename(tmpPath, dstPath)) {
throw new IOException(String.format("Unable to move %s to %s", tmpPath, dstPath));
}
// Obtain record count from map reduce job counter
// We don't get record count from file name because tracking which files are actually involved in the MR execution can
// be hard. This is due to new minutely data is rolled up to hourly folder but from daily compaction perspective we are not
// able to tell which file are newly added (because we simply pass all hourly folders to MR job instead of individual files).
Counter counter = job.getCounters().findCounter(RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT);
newTotalRecords = counter.getValue();
}
final Path finalDstPath = dstPath;
goodPaths.stream().forEach(p -> {
String fileName = p.getName();
outputFiles.add(new Path(finalDstPath, fileName));
});
this.configurator.setDstNewFiles(outputFiles);
State compactionState = helper.loadState(new Path(result.getDstAbsoluteDir()));
if (executionCount != 0) {
compactionState.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL + Long.toString(executionCount),
Long.toString(helper.readRecordCount(new Path(result.getDstAbsoluteDir()))));
compactionState.setProp(CompactionSlaEventHelper.EXEC_COUNT_TOTAL + Long.toString(executionCount),
Long.toString(executionCount));
compactionState.setProp(DUPLICATE_COUNT_TOTAL + Long.toString(executionCount),
compactionState.getProp(DUPLICATE_COUNT_TOTAL, "null"));
}
if (state.getPropAsBoolean(ConfigurationKeys.GOBBLIN_METADATA_CHANGE_EVENT_ENABLED, false)) {
//GMCE enabled, set the key to be false to indicate that GMCE has not been sent yet
compactionState.setProp(CompactionGMCEPublishingAction.GMCE_EMITTED_KEY, false);
}
compactionState.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords));
compactionState.setProp(CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executionCount + 1));
compactionState.setProp(CompactionSlaEventHelper.MR_JOB_ID,
this.configurator.getConfiguredJob().getJobID().toString());
compactionState.setProp(DUPLICATE_COUNT_TOTAL,
job.getCounters().findCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.DEDUPED).getValue());
compactionState.setProp(CompactionSlaEventHelper.LAST_RUN_START_TIME,
this.state.getProp(CompactionSource.COMPACTION_INIT_TIME));
helper.saveState(new Path(result.getDstAbsoluteDir()), compactionState);
log.info("duplicated records count for " + dstPath + " : " + compactionState.getProp(DUPLICATE_COUNT_TOTAL));
log.info("Updating record count from {} to {} in {} [{}]", oldTotalRecords, newTotalRecords, dstPath,
executionCount + 1);
// submit events for record count
if (eventSubmitter != null) {
Map<String, String> eventMetadataMap =
ImmutableMap.of(CompactionSlaEventHelper.DATASET_URN, dataset.datasetURN(),
CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords),
CompactionSlaEventHelper.PREV_RECORD_COUNT_TOTAL, Long.toString(oldTotalRecords),
CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executionCount + 1),
CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString());
this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT, eventMetadataMap);
}
}
}
public void addEventSubmitter(EventSubmitter eventSubmitter) {
this.eventSubmitter = eventSubmitter;
}
public String getName() {
return CompactionCompleteFileOperationAction.class.getName();
}
}
| 1,808 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionMarkDirectoryAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.metrics.event.EventSubmitter;
@Slf4j
@AllArgsConstructor
public class CompactionMarkDirectoryAction implements CompactionCompleteAction<FileSystemDataset> {
protected State state;
private CompactionJobConfigurator configurator;
private FileSystem fs;
private EventSubmitter eventSubmitter;
public CompactionMarkDirectoryAction(State state, CompactionJobConfigurator configurator) {
if (!(state instanceof WorkUnitState)) {
throw new UnsupportedOperationException(this.getClass().getName() + " only supports workunit state");
}
this.state = state;
this.configurator = configurator;
this.fs = configurator.getFs();
}
public void onCompactionJobComplete (FileSystemDataset dataset) throws IOException {
if (dataset.isVirtual()) {
return;
}
boolean renamingRequired = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
if (renamingRequired) {
Collection<Path> paths = configurator.getMapReduceInputPaths();
for (Path path: paths) {
Path newPath = new Path (path.getParent(), path.getName() + MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX);
log.info("[{}] Renaming {} to {}", dataset.datasetURN(), path, newPath);
fs.rename(path, newPath);
}
// submit events if directory is renamed
if (eventSubmitter != null) {
Map<String, String> eventMetadataMap = ImmutableMap.of(CompactionSlaEventHelper.DATASET_URN, dataset.datasetURN(),
CompactionSlaEventHelper.RENAME_DIR_PATHS, Joiner.on(',').join(paths));
this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_MARK_DIR_EVENT, eventMetadataMap);
}
}
}
public void addEventSubmitter(EventSubmitter submitter) {
this.eventSubmitter = submitter;
}
}
| 1,809 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionWatermarkAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.io.IOException;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.compaction.verify.CompactionWatermarkChecker;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.time.TimeIterator;
/**
* The class publishes compaction watermarks, reported by {@link CompactionWatermarkChecker}, as hive table parameters.
* It guarantees compaction watermark is updated continuously and errors out if there is a gap, which indicates a
* compaction hole. At the time of writing, one should manually fill the compaction hole and update the existing
* watermarks in hive table parameters to recover automatic watermark publish
*/
@Slf4j
public class CompactionWatermarkAction implements CompactionCompleteAction<FileSystemDataset> {
public static final String CONF_PREFIX = "compactionWatermarkAction";
public static final String GRANULARITY = CONF_PREFIX + ".granularity";
public static final String DEFAULT_HIVE_DB = CONF_PREFIX + ".defaultHiveDb";
private EventSubmitter submitter;
private State state;
private final String defaultHiveDb;
private final TimeIterator.Granularity granularity;
private final ZoneId zone;
public CompactionWatermarkAction(State state) {
this.state = state;
defaultHiveDb = state.getProp(DEFAULT_HIVE_DB);
granularity = TimeIterator.Granularity.valueOf(state.getProp(GRANULARITY).toUpperCase());
zone = ZoneId.of(state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
}
@Override
public void onCompactionJobComplete(FileSystemDataset dataset)
throws IOException {
String compactionWatermark = state.getProp(CompactionWatermarkChecker.COMPACTION_WATERMARK);
String completeCompactionWatermark = state.getProp(CompactionWatermarkChecker.COMPLETION_COMPACTION_WATERMARK);
if (StringUtils.isEmpty(compactionWatermark) && StringUtils.isEmpty(completeCompactionWatermark)) {
return;
}
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
HiveDatasetFinder.DbAndTable dbAndTable = extractDbTable(result.getDatasetName());
String hiveDb = dbAndTable.getDb();
String hiveTable = dbAndTable.getTable();
try (HiveRegister hiveRegister = HiveRegister.get(state)) {
Optional<HiveTable> tableOptional = hiveRegister.getTable(hiveDb, hiveTable);
if (!tableOptional.isPresent()) {
log.info("Table {}.{} not found. Skip publishing compaction watermarks", hiveDb, hiveTable);
return;
}
HiveTable table = tableOptional.get();
State tableProps = table.getProps();
boolean shouldUpdate = mayUpdateWatermark(dataset, tableProps, CompactionWatermarkChecker.COMPACTION_WATERMARK, compactionWatermark);
if (mayUpdateWatermark(dataset, tableProps, CompactionWatermarkChecker.COMPLETION_COMPACTION_WATERMARK,
completeCompactionWatermark)) {
shouldUpdate = true;
}
if (shouldUpdate) {
log.info("Alter table {}.{} to publish watermarks {}", hiveDb, hiveTable, tableProps);
hiveRegister.alterTable(table);
}
}
}
/**
* Update watermark if the new one is continuously higher than the existing one
*/
private boolean mayUpdateWatermark(FileSystemDataset dataset, State props, String key, String newValue) {
if (StringUtils.isEmpty(newValue)) {
return false;
}
long existing = props.getPropAsLong(key, 0);
if (existing == 0) {
props.setProp(key, newValue);
return true;
}
long actualNextWatermark = Long.parseLong(newValue);
if (actualNextWatermark <= existing) {
return false;
}
long expectedWatermark = getExpectedNextWatermark(existing);
if (actualNextWatermark != expectedWatermark) {
String errMsg = String.format(
"Fail to advance %s of dataset %s: expect %s but got %s, please manually fill the gap and rerun.",
key, dataset.datasetRoot(), expectedWatermark, actualNextWatermark);
log.error(errMsg);
throw new RuntimeException(errMsg);
}
props.setProp(key, newValue);
return true;
}
/**
* To guarantee watermark continuity, the expected next watermark should be: {@code previousWatermark} + 1
* unit of {@link #granularity}
*/
private long getExpectedNextWatermark(Long previousWatermark) {
ZonedDateTime previousWatermarkTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(previousWatermark), zone);
// Since version 1.8, java time supports DST change in PST(America/Los_Angeles) time zone
ZonedDateTime nextWatermarkTime = TimeIterator.inc(previousWatermarkTime, granularity, 1);
return nextWatermarkTime.toInstant().toEpochMilli();
}
@Override
public void addEventSubmitter(EventSubmitter submitter) {
this.submitter = submitter;
}
private HiveDatasetFinder.DbAndTable extractDbTable(String datasetName) {
String[] parts = datasetName.split("/");
if (parts.length == 0 || parts.length > 2) {
throw new RuntimeException(String.format("Unsupported dataset %s", datasetName));
}
String hiveDb = defaultHiveDb;
String hiveTable = parts[0];
// Use the db from the datasetName if it has
if (parts.length == 2) {
hiveDb = parts[0];
hiveTable = parts[1];
}
return new HiveDatasetFinder.DbAndTable(hiveDb, hiveTable);
}
}
| 1,810 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionHiveRegistrationAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.util.PathUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactionTask;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.metrics.event.CountEventBuilder;
import org.apache.gobblin.metrics.event.EventSubmitter;
/**
* Class responsible for hive registration after compaction is complete
*/
@Slf4j
public class CompactionHiveRegistrationAction implements CompactionCompleteAction<FileSystemDataset> {
public static final String NUM_OUTPUT_FILES = "numOutputFiles";
public static final String RECORD_COUNT = "recordCount";
public static final String BYTE_COUNT = "byteCount";
public static final String DATASET_URN = "datasetUrn";
private final State state;
private EventSubmitter eventSubmitter;
private InputRecordCountHelper helper;
public CompactionHiveRegistrationAction (State state) {
if (!(state instanceof WorkUnitState)) {
throw new UnsupportedOperationException(this.getClass().getName() + " only supports workunit state");
}
this.state = state;
}
public void onCompactionJobComplete(FileSystemDataset dataset) throws IOException {
if (dataset.isVirtual()) {
return;
}
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
long numFiles = state.getPropAsLong(MRCompactionTask.FILE_COUNT, -1);
CountEventBuilder fileCountEvent = new CountEventBuilder(NUM_OUTPUT_FILES, numFiles);
fileCountEvent.addMetadata(DATASET_URN, result.getDstAbsoluteDir());
fileCountEvent.addMetadata(RECORD_COUNT, state.getProp(MRCompactionTask.RECORD_COUNT, "-1"));
fileCountEvent.addMetadata(BYTE_COUNT, state.getProp(MRCompactionTask.BYTE_COUNT, "-1"));
if (this.eventSubmitter != null) {
this.eventSubmitter.submit(fileCountEvent);
} else {
log.warn("Will not emit events in {} as EventSubmitter is null", getClass().getName());
}
if (!state.contains(ConfigurationKeys.HIVE_REGISTRATION_POLICY)) {
log.info("Will skip hive registration as {} is not configured.", ConfigurationKeys.HIVE_REGISTRATION_POLICY);
return;
}
try (HiveRegister hiveRegister = HiveRegister.get(state)) {
state.setProp(KafkaSource.TOPIC_NAME, result.getDatasetName());
HiveRegistrationPolicy hiveRegistrationPolicy = HiveRegistrationPolicyBase.getPolicy(state);
List<String> paths = new ArrayList<>();
Path dstPath = new Path(result.getDstAbsoluteDir());
if (state.getPropAsBoolean(ConfigurationKeys.RECOMPACTION_WRITE_TO_NEW_FOLDER, false)) {
//Lazily initialize helper
this.helper = new InputRecordCountHelper(state);
long executionCount = helper.readExecutionCount(new Path(result.getDstAbsoluteDir()));
// Use new output path to do registration
dstPath = PathUtils.mergePaths(dstPath, new Path(String.format(CompactionCompleteFileOperationAction.COMPACTION_DIRECTORY_FORMAT, executionCount)));
}
for (HiveSpec spec : hiveRegistrationPolicy.getHiveSpecs(dstPath)) {
hiveRegister.register(spec);
paths.add(spec.getPath().toUri().toASCIIString());
log.info("Hive registration is done for {}", dstPath.toString());
}
// submit events for hive registration
if (eventSubmitter != null) {
Map<String, String> eventMetadataMap = ImmutableMap
.of(CompactionSlaEventHelper.DATASET_URN, dataset.datasetURN(), CompactionSlaEventHelper.HIVE_REGISTRATION_PATHS, Joiner.on(',').join(paths));
this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_HIVE_REGISTRATION_EVENT, eventMetadataMap);
}
}
}
public void addEventSubmitter(EventSubmitter submitter) {
this.eventSubmitter = submitter;
}
}
| 1,811 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionCompleteAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.io.IOException;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.metrics.event.EventSubmitter;
/**
* An interface which represents an action that is invoked after a compaction job is finished.
*/
public interface CompactionCompleteAction<D extends Dataset> {
void onCompactionJobComplete(D dataset) throws IOException;
default void addEventSubmitter(EventSubmitter submitter) {
throw new UnsupportedOperationException("Please add an EventSubmitter");
}
}
| 1,812 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/action/CompactionGMCEPublishingAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.iceberg.GobblinMCEProducer;
import org.apache.gobblin.iceberg.Utils.IcebergUtils;
import org.apache.gobblin.iceberg.publisher.GobblinMCEPublisher;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.Schema;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.mapping.MappingUtil;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.orc.ORCSchemaUtil;
import org.apache.iceberg.shaded.org.apache.orc.TypeDescription;
import org.apache.orc.OrcConf;
/**
* Class responsible for emitting GMCE after compaction is complete
*/
@Slf4j
public class CompactionGMCEPublishingAction implements CompactionCompleteAction<FileSystemDataset> {
public static final String ICEBERG_ID_ATTRIBUTE = "iceberg.id";
public static final String ICEBERG_REQUIRED_ATTRIBUTE = "iceberg.required";
public final static String GMCE_EMITTED_KEY = "GMCE.emitted";
private final State state;
private final CompactionJobConfigurator configurator;
private final Configuration conf;
private InputRecordCountHelper helper;
private EventSubmitter eventSubmitter;
public CompactionGMCEPublishingAction(State state, CompactionJobConfigurator configurator, InputRecordCountHelper helper) {
if (!(state instanceof WorkUnitState)) {
throw new UnsupportedOperationException(this.getClass().getName() + " only supports workunit state");
}
this.state = state;
this.configurator = configurator;
this.conf = HadoopUtils.getConfFromState(state);
this.helper = helper;
}
public CompactionGMCEPublishingAction(State state, CompactionJobConfigurator configurator) {
this(state, configurator, new InputRecordCountHelper(state));
}
public void onCompactionJobComplete(FileSystemDataset dataset) throws IOException {
if (dataset.isVirtual()) {
return;
}
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
String datasetDir = Joiner.on("/").join(result.getDstBaseDir(), result.getDatasetName());
state.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, datasetDir);
try (GobblinMCEProducer producer = GobblinMCEProducer.getGobblinMCEProducer(state)) {
producer.sendGMCE(getNewFileMetrics(result), null, Lists.newArrayList(this.configurator.getOldFiles()), null,
OperationType.rewrite_files, SchemaSource.NONE);
}
State compactionState = helper.loadState(new Path(result.getDstAbsoluteDir()));
//Set the prop to be true to indicate that gmce has been emitted
compactionState.setProp(GMCE_EMITTED_KEY, true);
helper.saveState(new Path(result.getDstAbsoluteDir()), compactionState);
//clear old files to release memory
this.configurator.getOldFiles().clear();
}
private Map<Path, Metrics> getNewFileMetrics(CompactionPathParser.CompactionParserResult result) {
NameMapping mapping = null;
try {
if (IcebergUtils.getIcebergFormat(state) == FileFormat.ORC) {
String s =
this.configurator.getConfiguredJob().getConfiguration().get(OrcConf.MAPRED_OUTPUT_SCHEMA.getAttribute());
TypeDescription orcSchema = TypeDescription.fromString(s);
for (int i = 0; i <= orcSchema.getMaximumId(); i++) {
orcSchema.findSubtype(i).setAttribute(ICEBERG_ID_ATTRIBUTE, Integer.toString(i));
orcSchema.findSubtype(i).setAttribute(ICEBERG_REQUIRED_ATTRIBUTE, Boolean.toString(true));
}
Schema icebergSchema = ORCSchemaUtil.convert(orcSchema);
state.setProp(GobblinMCEPublisher.AVRO_SCHEMA_WITH_ICEBERG_ID,
AvroSchemaUtil.convert(icebergSchema.asStruct()).toString());
mapping = MappingUtil.create(icebergSchema);
}
} catch (Exception e) {
log.warn(
"Table {} contains complex union type which is not compatible with iceberg, will not calculate the metrics for it",
result.getDatasetName());
}
Map<Path, Metrics> newFiles = new HashMap<>();
for (Path filePath : this.configurator.getDstNewFiles()) {
newFiles.put(filePath, GobblinMCEPublisher.getMetrics(state, filePath, conf, mapping));
}
return newFiles;
}
public void addEventSubmitter(EventSubmitter submitter) {
this.eventSubmitter = submitter;
}
}
| 1,813 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/AuditCountClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import org.apache.gobblin.configuration.State;
/**
* A factory class responsible for creating {@link AuditCountClient}
* @Deprecated {@link org.apache.gobblin.completeness.audit.AuditCountClientFactory}
*/
@Deprecated
public interface AuditCountClientFactory {
String AUDIT_COUNT_CLIENT_FACTORY = "audit.count.client.factory";
AuditCountClient createAuditCountClient (State state);
}
| 1,814 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/KafkaAuditCountHttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import javax.annotation.concurrent.ThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.completeness.audit.AuditCountHttpClient;
import org.apache.gobblin.configuration.State;
/**
* A {@link AuditCountClient} which uses {@link org.apache.http.client.HttpClient}
* to perform audit count query.
* @Deprecated {@link AuditCountHttpClient}
*/
@Slf4j
@ThreadSafe
@Deprecated
public class KafkaAuditCountHttpClient implements AuditCountClient {
// Keys
public static final String KAFKA_AUDIT_HTTP = "kafka.audit.http";
public static final String CONNECTION_MAX_TOTAL = KAFKA_AUDIT_HTTP + "max.total";
public static final int DEFAULT_CONNECTION_MAX_TOTAL = 10;
public static final String MAX_PER_ROUTE = KAFKA_AUDIT_HTTP + "max.per.route";
public static final int DEFAULT_MAX_PER_ROUTE = 10;
public static final String KAFKA_AUDIT_REST_BASE_URL = "kafka.audit.rest.base.url";
public static final String KAFKA_AUDIT_REST_MAX_TRIES = "kafka.audit.rest.max.tries";
public static final String KAFKA_AUDIT_REST_START_QUERYSTRING_KEY = "kafka.audit.rest.querystring.start";
public static final String KAFKA_AUDIT_REST_END_QUERYSTRING_KEY = "kafka.audit.rest.querystring.end";
public static final String KAFKA_AUDIT_REST_START_QUERYSTRING_DEFAULT = "begin";
public static final String KAFKA_AUDIT_REST_END_QUERYSTRING_DEFAULT = "end";
// Http Client
private PoolingHttpClientConnectionManager cm;
private CloseableHttpClient httpClient;
private static final JsonParser PARSER = new JsonParser();
private final String baseUrl;
private final String startQueryString;
private final String endQueryString;
private final int maxNumTries;
/**
* Constructor
*/
public KafkaAuditCountHttpClient (State state) {
int maxTotal = state.getPropAsInt(CONNECTION_MAX_TOTAL, DEFAULT_CONNECTION_MAX_TOTAL);
int maxPerRoute = state.getPropAsInt(MAX_PER_ROUTE, DEFAULT_MAX_PER_ROUTE);
cm = new PoolingHttpClientConnectionManager();
cm.setMaxTotal(maxTotal);
cm.setDefaultMaxPerRoute(maxPerRoute);
httpClient = HttpClients.custom()
.setConnectionManager(cm)
.build();
this.baseUrl = state.getProp(KAFKA_AUDIT_REST_BASE_URL);
this.maxNumTries = state.getPropAsInt(KAFKA_AUDIT_REST_MAX_TRIES, 5);
this.startQueryString = state.getProp(KAFKA_AUDIT_REST_START_QUERYSTRING_KEY, KAFKA_AUDIT_REST_START_QUERYSTRING_DEFAULT);
this.endQueryString = state.getProp(KAFKA_AUDIT_REST_END_QUERYSTRING_KEY, KAFKA_AUDIT_REST_END_QUERYSTRING_DEFAULT);
}
public Map<String, Long> fetch (String datasetName, long start, long end) throws IOException {
String fullUrl =
(this.baseUrl.endsWith("/") ? this.baseUrl : this.baseUrl + "/") + StringUtils.replaceChars(datasetName, '/', '.')
+ "?" + this.startQueryString + "=" + start + "&" + this.endQueryString + "=" + end;
log.info("Full URL is " + fullUrl);
String response = getHttpResponse(fullUrl);
return parseResponse (fullUrl, response, datasetName);
}
/**
* Expects <code>response</code> being parsed to be as below.
*
* <pre>
* {
* "result": {
* "hadoop-tracking-lva1tarock-08": 79341895,
* "hadoop-tracking-uno-08": 79341892,
* "kafka-08-tracking-local": 79341968,
* "kafka-corp-lca1-tracking-agg": 79341968,
* "kafka-corp-ltx1-tracking-agg": 79341968,
* "producer": 69483513
* }
* }
* </pre>
*/
@VisibleForTesting
public static Map<String, Long> parseResponse(String fullUrl, String response, String topic) throws IOException {
Map<String, Long> result = Maps.newHashMap();
JsonObject countsPerTier = null;
try {
JsonObject jsonObj = PARSER.parse(response).getAsJsonObject();
countsPerTier = jsonObj.getAsJsonObject("result");
} catch (Exception e) {
throw new IOException(String.format("Unable to parse JSON response: %s for request url: %s ", response,
fullUrl), e);
}
Set<Map.Entry<String, JsonElement>> entrySet = countsPerTier.entrySet();
for(Map.Entry<String, JsonElement> entry : entrySet) {
String tier = entry.getKey();
long count = Long.parseLong(entry.getValue().getAsString());
result.put(tier, count);
}
return result;
}
private String getHttpResponse(String fullUrl) throws IOException {
HttpUriRequest req = new HttpGet(fullUrl);
for (int numTries = 0;; numTries++) {
try (CloseableHttpResponse response = this.httpClient.execute(req)) {
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 200 || statusCode >= 300) {
throw new IOException(
String.format("status code: %d, reason: %s", statusCode, response.getStatusLine().getReasonPhrase()));
}
return EntityUtils.toString(response.getEntity());
} catch (IOException e) {
String errMsg = "Unable to get or parse HTTP response for " + fullUrl;
if (numTries >= this.maxNumTries) {
throw new IOException (errMsg, e);
}
long backOffSec = (numTries + 1) * 2;
log.error(errMsg + ", will retry in " + backOffSec + " sec", e);
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(backOffSec));
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
}
}
}
}
| 1,815 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/KafkaAuditCountHttpClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.completeness.audit.AuditCountHttpClientFactory;
import org.apache.gobblin.configuration.State;
/**
* Factory to create an instance of type {@link KafkaAuditCountHttpClient}
* @Deprecated {@link AuditCountHttpClientFactory}
*/
@Alias("KafkaAuditCountHttpClientFactory")
@Deprecated
public class KafkaAuditCountHttpClientFactory implements AuditCountClientFactory {
public KafkaAuditCountHttpClient createAuditCountClient (State state) {
return new KafkaAuditCountHttpClient(state);
}
}
| 1,816 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/AuditCountClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import java.io.IOException;
import java.util.Map;
/**
* A type of client used to query the audit counts from Pinot backend
@Deprecated {@link org.apache.gobblin.completeness.audit.AuditCountClient}
*/
@Deprecated
public interface AuditCountClient {
Map<String, Long> fetch (String topic, long start, long end) throws IOException;
}
| 1,817 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/PinotAuditCountHttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import java.io.IOException;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import com.google.api.client.util.Charsets;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import javax.annotation.concurrent.ThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
/**
* A {@link AuditCountClient} which uses {@link org.apache.http.client.HttpClient}
* to perform audit count query.
*/
@Slf4j
@ThreadSafe
public class PinotAuditCountHttpClient implements AuditCountClient {
// Keys
public static final String PINOT_AUDIT_HTTP = "pinot.audit.http";
public static final String CONNECTION_MAX_TOTAL = PINOT_AUDIT_HTTP + "max.total";
public static final int DEFAULT_CONNECTION_MAX_TOTAL = 10;
public static final String MAX_PER_ROUTE = PINOT_AUDIT_HTTP + "max.per.route";
public static final int DEFAULT_MAX_PER_ROUTE = 10;
public static final String TARGET_HOST = PINOT_AUDIT_HTTP + "target.host";
public static final String TARGET_PORT = PINOT_AUDIT_HTTP + "target.port";
// Http Client
private PoolingHttpClientConnectionManager cm;
private CloseableHttpClient httpClient;
private static final JsonParser PARSER = new JsonParser();
private String targetUrl;
/**
* Constructor
*/
public PinotAuditCountHttpClient(State state) {
int maxTotal = state.getPropAsInt(CONNECTION_MAX_TOTAL, DEFAULT_CONNECTION_MAX_TOTAL);
int maxPerRoute = state.getPropAsInt(MAX_PER_ROUTE, DEFAULT_MAX_PER_ROUTE);
cm = new PoolingHttpClientConnectionManager();
cm.setMaxTotal(maxTotal);
cm.setDefaultMaxPerRoute(maxPerRoute);
httpClient = HttpClients.custom()
.setConnectionManager(cm)
.build();
String host = state.getProp(TARGET_HOST);
int port = state.getPropAsInt(TARGET_PORT);
targetUrl = host + ":" + port + "/pql?pql=";
}
/**
* A thread-safe method which fetches a tier-to-count mapping.
* The returned json object from Pinot contains below information
* {
* "aggregationResults":[
* {
* "groupByResult":[
* {
* "value":"172765137.00000",
* "group":[
* "kafka-08-tracking-local"
* ]
* }
* ]
* }
* ],
* "exceptions":[
* ]
* .....
* }
* @param datasetName name of dataset
* @param start time start point in milliseconds
* @param end time end point in milliseconds
* @return A tier to record count mapping when succeeded. Otherwise a null value is returned
*/
public Map<String, Long> fetch (String datasetName, long start, long end) throws IOException {
Map<String, Long> map = new HashMap<>();
String query = "select tier, sum(count) from kafkaAudit where " +
"eventType=\"" + datasetName + "\" and " +
"beginTimestamp >= \"" + start + "\" and " +
"beginTimestamp < \"" + end + "\" group by tier";
String fullURL = targetUrl + URLEncoder.encode (query, Charsets.UTF_8.toString());
HttpGet req = new HttpGet(fullURL);
String rst = null;
HttpEntity entity = null;
log.info ("Full url for {} is {}", datasetName, fullURL);
try {
CloseableHttpResponse response = httpClient.execute(req, HttpClientContext.create());
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 200 || statusCode >= 300) {
throw new IOException(
String.format("status code: %d, reason: %s", statusCode, response.getStatusLine().getReasonPhrase()));
}
entity = response.getEntity();
rst = EntityUtils.toString(entity);
} finally {
if (entity != null) {
EntityUtils.consume(entity);
}
}
JsonObject all = PARSER.parse(rst).getAsJsonObject();
JsonArray aggregationResults = all.getAsJsonArray("aggregationResults");
if (aggregationResults == null || aggregationResults.size() == 0) {
log.error (all.toString());
throw new IOException("No aggregation results " + all.toString());
}
JsonObject aggregation = (JsonObject) aggregationResults.get(0);
JsonArray groupByResult = aggregation.getAsJsonArray("groupByResult");
if (groupByResult == null || groupByResult.size() == 0) {
log.error (aggregation.toString());
throw new IOException("No aggregation results " + aggregation.toString());
}
log.info ("Audit count for {} is {}", datasetName, groupByResult);
for (JsonElement ele : groupByResult){
JsonObject record = (JsonObject) ele;
map.put(record.getAsJsonArray("group").get(0).getAsString(), (long) Double.parseDouble(record.get("value").getAsString()));
}
return map;
}
}
| 1,818 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/audit/PinotAuditCountHttpClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.audit;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
/**
* Factory to create an instance of type {@link PinotAuditCountHttpClient}
*/
@Alias("PinotAuditCountHttpClientFactory")
public class PinotAuditCountHttpClientFactory implements AuditCountClientFactory {
public PinotAuditCountHttpClient createAuditCountClient (State state) {
return new PinotAuditCountHttpClient(state);
}
} | 1,819 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionConditionBasedOnRatio.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Splitter;
import com.google.common.collect.Maps;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.util.DatasetFilterUtils;
/**
* An implementation {@link RecompactionCondition} which examines the late record percentage.
* If the percent exceeds the limit, a recompaction is triggered.
*/
@Alias("RecompactionConditionBasedOnRatio")
public class RecompactionConditionBasedOnRatio implements RecompactionCondition {
public static final char DATASETS_WITH_DIFFERENT_RECOMPACT_THRESHOLDS_SEPARATOR = ';';
public static final char DATASETS_WITH_SAME_RECOMPACT_THRESHOLDS_SEPARATOR = ',';
public static final char DATASETS_AND_RECOMPACT_THRESHOLD_SEPARATOR = ':';
private static final Logger logger = LoggerFactory.getLogger (RecompactionConditionBasedOnRatio.class);
private final double ratio;
private RecompactionConditionBasedOnRatio (Dataset dataset) {
Map<String, Double> datasetRegexAndRecompactThreshold = getDatasetRegexAndRecompactThreshold(
dataset.jobProps().getProp(
MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET, StringUtils.EMPTY));
this.ratio = getOwnRatioThreshold (dataset, datasetRegexAndRecompactThreshold);
}
@Alias("RecompactBasedOnRatio")
public static class Factory implements RecompactionConditionFactory {
@Override public RecompactionCondition createRecompactionCondition (Dataset dataset) {
return new RecompactionConditionBasedOnRatio (dataset);
}
}
public static Map<String, Double> getDatasetRegexAndRecompactThreshold (String datasetsAndRecompactThresholds) {
Map<String, Double> topicRegexAndRecompactThreshold = Maps.newHashMap();
for (String entry : Splitter.on(DATASETS_WITH_DIFFERENT_RECOMPACT_THRESHOLDS_SEPARATOR).trimResults()
.omitEmptyStrings().splitToList(datasetsAndRecompactThresholds)) {
List<String> topicsAndRecompactThreshold =
Splitter.on(DATASETS_AND_RECOMPACT_THRESHOLD_SEPARATOR).trimResults().omitEmptyStrings().splitToList(entry);
if (topicsAndRecompactThreshold.size() != 2) {
logger.error("Invalid form (DATASET_NAME:THRESHOLD) in "
+ MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET + ".");
} else {
topicRegexAndRecompactThreshold.put(topicsAndRecompactThreshold.get(0),
Double.parseDouble(topicsAndRecompactThreshold.get(1)));
}
}
return topicRegexAndRecompactThreshold;
}
private double getOwnRatioThreshold (Dataset dataset, Map<String, Double> datasetRegexAndRecompactThreshold) {
return getRatioThresholdByDatasetName (dataset.getDatasetName(), datasetRegexAndRecompactThreshold);
}
public static double getRatioThresholdByDatasetName (String datasetName, Map<String, Double> datasetRegexAndRecompactThreshold) {
for (Map.Entry<String, Double> topicRegexEntry : datasetRegexAndRecompactThreshold.entrySet()) {
if (DatasetFilterUtils.stringInPatterns(datasetName,
DatasetFilterUtils.getPatternsFromStrings(Splitter.on(DATASETS_WITH_SAME_RECOMPACT_THRESHOLDS_SEPARATOR)
.trimResults().omitEmptyStrings().splitToList(topicRegexEntry.getKey())))) {
return topicRegexEntry.getValue();
}
}
return MRCompactor.DEFAULT_COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET;
}
public boolean isRecompactionNeeded (DatasetHelper datasetHelper) {
long lateDataCount = datasetHelper.getLateOutputRecordCount();
long nonLateDataCount = datasetHelper.getOutputRecordCount();
double lateDataPercent = lateDataCount * 1.0 / (lateDataCount + nonLateDataCount);
logger.info ("Late data ratio is " + lateDataPercent + " and threshold is " + this.ratio);
if (lateDataPercent > ratio) {
return true;
}
return false;
}
} | 1,820 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionCombineCondition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import java.lang.reflect.InvocationTargetException;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* An implementation {@link RecompactionCondition} which contains multiple recompact conditions.
* An operation (AND or OR) is to combine these operations.
*/
@Alias("RecompactionCombineCondition")
public class RecompactionCombineCondition implements RecompactionCondition {
public enum CombineOperation {
OR,
AND
}
private final List<RecompactionCondition> recompactionConditions;
private final CombineOperation operation;
private static final Logger logger = LoggerFactory.getLogger (RecompactionCombineCondition.class);
private RecompactionCombineCondition (Dataset dataset) {
this.recompactionConditions = getConditionsFromProperties (dataset);
this.operation = getConditionOperation(dataset);
if (this.recompactionConditions.size() == 0) {
throw new IllegalArgumentException( "No combine conditions specified");
}
}
@Alias("RecompactBasedOnCombination")
public static class Factory implements RecompactionConditionFactory {
@Override public RecompactionCondition createRecompactionCondition (Dataset dataset) {
return new RecompactionCombineCondition(dataset);
}
}
public RecompactionCombineCondition (List<RecompactionCondition> conditions, CombineOperation opr) {
this.recompactionConditions = conditions;
this.operation = opr;
}
private CombineOperation getConditionOperation (Dataset dataset) {
String oprName = dataset.jobProps().getProp (MRCompactor.COMPACTION_RECOMPACT_COMBINE_CONDITIONS_OPERATION,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_COMBINE_CONDITIONS_OPERATION);
try {
CombineOperation opr = CombineOperation.valueOf (oprName.toUpperCase());
return opr;
} catch (Exception e) {
return CombineOperation.OR;
}
}
private ImmutableList<RecompactionCondition> getConditionsFromProperties (Dataset dataset) {
ClassAliasResolver<RecompactionConditionFactory> conditionClassAliasResolver = new ClassAliasResolver<>(RecompactionConditionFactory.class);
List<String> factoryNames = dataset.jobProps().getPropAsList(MRCompactor.COMPACTION_RECOMPACT_COMBINE_CONDITIONS,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_CONDITION);
ImmutableList.Builder<RecompactionCondition> builder = ImmutableList.builder();
for (String factoryName : factoryNames) {
try {
RecompactionConditionFactory factory = GobblinConstructorUtils.invokeFirstConstructor(
conditionClassAliasResolver.resolveClass(factoryName), ImmutableList.of());
RecompactionCondition condition = factory.createRecompactionCondition(dataset);
builder.add(condition);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
return builder.build();
}
/**
* For OR combination, return true iff one of conditions return true
* For AND combination, return true iff all of conditions return true
* Other cases, return false
*/
public boolean isRecompactionNeeded (DatasetHelper helper) {
if (recompactionConditions.isEmpty())
return false;
if (operation == CombineOperation.OR) {
for (RecompactionCondition c : recompactionConditions) {
if (c.isRecompactionNeeded(helper)) {
return true;
}
}
return false;
} else {
for (RecompactionCondition c : recompactionConditions) {
if (!c.isRecompactionNeeded(helper)) {
return false;
}
}
return true;
}
}
}
| 1,821 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionConditionBasedOnDuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
/**
* An implementation {@link RecompactionCondition} which checks the earliest file modification timestamp from
* the late output directory. If the earliest file has passed a specified duration and was never cleaned up, a
* recmpaction will be triggered.
*/
@Alias("RecompactionConditionBasedOnDuration")
public class RecompactionConditionBasedOnDuration implements RecompactionCondition {
private final Period duration;
private static final Logger logger = LoggerFactory.getLogger (RecompactionConditionBasedOnDuration.class);
private RecompactionConditionBasedOnDuration(Dataset dataset) {
this.duration = getOwnDurationThreshold(dataset);
}
@Alias("RecompactBasedOnDuration")
public static class Factory implements RecompactionConditionFactory {
@Override public RecompactionCondition createRecompactionCondition (Dataset dataset) {
return new RecompactionConditionBasedOnDuration (dataset);
}
}
private Period getOwnDurationThreshold (Dataset dataset) {
String retention = dataset.jobProps().getProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_DURATION,
MRCompactor.DEFAULT_COMPACTION_LATEDATA_THRESHOLD_DURATION);
Period period = getPeriodFormatter().parsePeriod(retention);
return period;
}
private static PeriodFormatter getPeriodFormatter() {
return new PeriodFormatterBuilder().appendMonths().appendSuffix("m").appendDays().appendSuffix("d").appendHours()
.appendSuffix("h").appendMinutes().appendSuffix("min").toFormatter();
}
public boolean isRecompactionNeeded (DatasetHelper datasetHelper) {
Optional<DateTime> earliestFileModificationTime = datasetHelper.getEarliestLateFileModificationTime();
DateTime currentTime = datasetHelper.getCurrentTime();
if (earliestFileModificationTime.isPresent()) {
DateTime checkpoint = currentTime.minus(duration);
logger.info ("Current time is " + currentTime + " checkpoint is " + checkpoint);
logger.info ("Earliest late file has timestamp " + earliestFileModificationTime.get() +
" inside " + datasetHelper.getDataset().outputLatePath());
if (earliestFileModificationTime.get().isBefore(checkpoint)) {
return true;
}
}
return false;
}
}
| 1,822 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionCondition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
/**
* There are different recompaction conditions and their combinations in Gobblin recompaction flow . For example,
* depending on the number of late records, number of late files, or the late files duration, user may choose
* different criteria or different combination strategies to decide if a recompaction is mandatory.
*
* The interface {@link RecompactionCondition} provides a generic API. This is used when
* {@link org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner} attempts to check if a recompaction is necessary. Real
* examination is delegated to {@link org.apache.gobblin.compaction.dataset.Dataset#checkIfNeedToRecompact(DatasetHelper)},
* which finally invokes this API.
*/
public interface RecompactionCondition {
boolean isRecompactionNeeded (DatasetHelper helper);
}
| 1,823 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionConditionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import org.apache.gobblin.compaction.dataset.Dataset;
public interface RecompactionConditionFactory {
RecompactionCondition createRecompactionCondition (Dataset dataset);
}
| 1,824 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/conditions/RecompactionConditionBasedOnFileCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.conditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
/**
* An implementation {@link RecompactionCondition} which examines the number of files in the late outputDir
* If the file count exceeds the file count limit, a recompaction flow is triggered.
*/
@Alias("RecompactionConditionBasedOnFileCount")
public class RecompactionConditionBasedOnFileCount implements RecompactionCondition {
private final int fileCountLimit;
private static final Logger logger = LoggerFactory.getLogger (RecompactionConditionBasedOnFileCount.class);
private RecompactionConditionBasedOnFileCount (Dataset dataset) {
this.fileCountLimit = getOwnFileCountThreshold (dataset);
}
@Alias("RecompactBasedOnFileCount")
public static class Factory implements RecompactionConditionFactory {
@Override public RecompactionCondition createRecompactionCondition (Dataset dataset) {
return new RecompactionConditionBasedOnFileCount (dataset);
}
}
private int getOwnFileCountThreshold (Dataset dataset) {
int count = dataset.jobProps().getPropAsInt(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FILE_NUM,
MRCompactor.DEFAULT_COMPACTION_LATEDATA_THRESHOLD_FILE_NUM);
return count;
}
public boolean isRecompactionNeeded (DatasetHelper datasetHelper) {
long fileNum = datasetHelper.getLateOutputFileCount();
logger.info ("File count is " + fileNum + " and threshold is " + this.fileCountLimit);
return (fileNum >= fileCountLimit);
}
}
| 1,825 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hivebasedconstructs/CompactionLauncherWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hivebasedconstructs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.mapreduce.avro.ConfBasedDeltaFieldProvider;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.writer.DataWriter;
/**
* {@link DataWriter} that launches an {@link MRCompactor} job given an {@link MRCompactionEntity} specifying config
* for compaction
*
* Delta field is passed using {@link ConfBasedDeltaFieldProvider}
*/
public class CompactionLauncherWriter implements DataWriter<MRCompactionEntity> {
@Override
public void write(MRCompactionEntity compactionEntity) throws IOException {
Preconditions.checkNotNull(compactionEntity);
List<? extends Tag<?>> list = new ArrayList<>();
Properties props = new Properties();
props.putAll(compactionEntity.getProps());
props.setProperty(ConfBasedDeltaFieldProvider.DELTA_FIELDS_KEY,
Joiner.on(',').join(compactionEntity.getDeltaList()));
props.setProperty(MRCompactor.COMPACTION_INPUT_DIR, compactionEntity.getDataFilesPath().toString());
String dbTableName = compactionEntity.getDataFilesPath().getName();
String timestamp = String.valueOf(System.currentTimeMillis());
props.setProperty(MRCompactor.COMPACTION_TMP_DEST_DIR,
new Path(props.getProperty(MRCompactor.COMPACTION_TMP_DEST_DIR), dbTableName).toString());
props.setProperty(MRCompactor.COMPACTION_DEST_DIR,
new Path(new Path(props.getProperty(MRCompactor.COMPACTION_DEST_DIR), dbTableName), timestamp).toString());
MRCompactor compactor = new MRCompactor(props, list, Optional.<CompactorListener>absent());
compactor.compact();
}
@Override
public void commit() throws IOException {}
@Override
public void cleanup() throws IOException {}
@Override
public long recordsWritten() {
return 0;
}
@Override
public long bytesWritten() throws IOException {
return 0;
}
@Override
public void close() throws IOException {}
}
| 1,826 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hivebasedconstructs/MRCompactionEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hivebasedconstructs;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import lombok.Getter;
/**
* Entity that stores information required for launching an {@link org.apache.gobblin.compaction.mapreduce.MRCompactor} job
*
* {@link #primaryKeyList}: Comma delimited list of fields to use as primary key
* {@link #deltaList}: Comma delimited list of fields to use as deltaList
* {@link #dataFilesPath}: Location of files associated with table
* {@link #props}: Other properties to be passed to {@link org.apache.gobblin.compaction.mapreduce.MRCompactor}
*/
@Getter
public class MRCompactionEntity {
private final List<String> primaryKeyList;
private final List<String> deltaList;
private final Path dataFilesPath;
private final Properties props;
public MRCompactionEntity(List<String> primaryKeyList, List<String> deltaList, Path dataFilesPath, Properties props) {
this.primaryKeyList = primaryKeyList;
this.deltaList = deltaList;
this.dataFilesPath = dataFilesPath;
this.props = props;
}
}
| 1,827 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hivebasedconstructs/HiveMetadataForCompactionExtractorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hivebasedconstructs;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.thrift.TException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveBaseExtractor;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveBaseExtractorFactory;
/**
* Factory for {@link HiveMetadataForCompactionExtractor}
*/
public class HiveMetadataForCompactionExtractorFactory implements HiveBaseExtractorFactory {
public HiveBaseExtractor createExtractor(WorkUnitState state, FileSystem sourceFs)
throws IOException, TException {
return new HiveMetadataForCompactionExtractor(state, sourceFs);
}
} | 1,828 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hivebasedconstructs/CompactionLauncherWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hivebasedconstructs;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* {@link DataWriterBuilder} for {@link CompactionLauncherWriter}
*/
public class CompactionLauncherWriterBuilder extends DataWriterBuilder<Schema, MRCompactionEntity> {
@Override
public DataWriter<MRCompactionEntity> build() throws IOException {
return new CompactionLauncherWriter();
}
}
| 1,829 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hivebasedconstructs/HiveMetadataForCompactionExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hivebasedconstructs;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveBaseExtractor;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.util.AutoReturnableObject;
/**
* {@link Extractor} that extracts primary key field name, delta field name, and location from hive metastore and
* creates an {@link MRCompactionEntity}
*/
@Slf4j
public class HiveMetadataForCompactionExtractor extends HiveBaseExtractor<Void, MRCompactionEntity> {
public static final String COMPACTION_PRIMARY_KEY = "hive.metastore.primaryKey";
public static final String COMPACTION_DELTA = "hive.metastore.delta";
private MRCompactionEntity compactionEntity;
private boolean extracted = false;
public HiveMetadataForCompactionExtractor(WorkUnitState state, FileSystem fs) throws IOException, TException {
super(state);
if (Boolean.valueOf(state.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY))) {
log.info("Ignoring Watermark workunit for {}", state.getProp(ConfigurationKeys.DATASET_URN_KEY));
return;
}
try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
Table table = client.get().getTable(this.dbName, this.tableName);
String primaryKeyString = table.getParameters().get(state.getProp(COMPACTION_PRIMARY_KEY));
List<String> primaryKeyList = Splitter.on(',').omitEmptyStrings().trimResults().splitToList(primaryKeyString);
String deltaString = table.getParameters().get(state.getProp(COMPACTION_DELTA));
List<String> deltaList = Splitter.on(',').omitEmptyStrings().trimResults().splitToList(deltaString);
Path dataFilesPath = new Path(table.getSd().getLocation());
compactionEntity = new MRCompactionEntity(primaryKeyList, deltaList, dataFilesPath, state.getProperties());
}
}
@Override
public MRCompactionEntity readRecord(MRCompactionEntity reuse) {
if (!extracted) {
extracted = true;
return compactionEntity;
} else {
return null;
}
}
@Override
public Void getSchema() throws IOException {
return null;
}
}
| 1,830 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HdfsWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.IOException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.util.HadoopUtils;
/**
* A class for write operations on HDFS.
*/
public class HdfsWriter extends HdfsIO {
public HdfsWriter(String filePathInHdfs) throws IOException {
super(filePathInHdfs);
}
public void write(String text) throws IOException {
String dirInHdfs = getDirInHdfs();
this.fileSystem.mkdirs(new Path(dirInHdfs));
try (FSDataOutputStream fout = this.fileSystem.create(new Path(this.filePathInHdfs))) {
fout.writeChars(text);
}
}
private String getDirInHdfs() {
return new Path(this.filePathInHdfs).getParent().toString();
}
public boolean delete() throws IllegalArgumentException, IOException {
return this.fileSystem.delete(new Path(this.filePathInHdfs), true);
}
public static void moveSelectFiles(String extension, String source, String destination) throws IOException {
FileSystem fs = getFileSystem();
fs.mkdirs(new Path(destination));
FileStatus[] fileStatuses = fs.listStatus(new Path(source));
for (FileStatus fileStatus : fileStatuses) {
Path path = fileStatus.getPath();
if (!fileStatus.isDirectory() && path.toString().toLowerCase().endsWith(extension.toLowerCase())) {
HadoopUtils.deleteIfExists(fs, new Path(destination), true);
HadoopUtils.copyPath(fs, path, fs, new Path(destination), getConfiguration());
}
}
}
}
| 1,831 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HdfsReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class for read operations on HDFS.
*/
public class HdfsReader extends HdfsIO {
private static final Logger LOG = LoggerFactory.getLogger(HdfsReader.class);
public HdfsReader(String filePathInHdfs) throws IOException {
super(filePathInHdfs);
}
public InputStream getInputStream() throws IOException {
return this.fileSystem.open(new Path(this.filePathInHdfs));
}
public FsInput getFsInput() throws IOException {
Path path = new Path(this.filePathInHdfs);
Configuration conf = getConfiguration();
return new FsInput(path, conf);
}
public static String getFirstDataFilePathInDir(String dirInHdfs) throws IOException {
FileStatus[] fileStatuses = getFileSystem().listStatus(new Path(dirInHdfs));
for (FileStatus fileStatus : fileStatuses) {
Path dataFilePath = fileStatus.getPath();
if (!fileStatus.isDirectory() && !dataFilePath.getName().startsWith("_")) {
return dataFilePath.toString();
}
}
String message = dirInHdfs + " does not contain a valid data file.";
LOG.error(message);
throw new RuntimeException(message);
}
}
| 1,832 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HiveTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Splitter;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.util.HiveJdbcConnector;
/**
* A class for managing general Hive tables.
*/
public abstract class HiveTable {
protected static final String DROP_TABLE_STMT = "DROP TABLE IF EXISTS %1$s";
protected final String name;
protected List<String> primaryKeys;
protected List<HiveAttribute> attributes;
public static class Builder<T extends Builder<?>> {
protected String name = HiveMetaStoreUtils.getHiveTableName(UUID.randomUUID().toString());
protected List<String> primaryKeys = new ArrayList<>();
protected List<HiveAttribute> attributes = new ArrayList<>();
@SuppressWarnings("unchecked")
public T withName(String name) {
if (StringUtils.isNotBlank(name)) {
this.name = name;
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T withPrimaryKeys(List<String> primaryKeys) {
this.primaryKeys = primaryKeys;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withPrimaryKeys(String keyAttrs) {
List<String> keyAttrsList = Splitter.on(",").trimResults().omitEmptyStrings().splitToList(keyAttrs);
this.primaryKeys.addAll(keyAttrsList);
return (T) this;
}
@SuppressWarnings("unchecked")
public T withAttributes(List<HiveAttribute> attributes) {
this.attributes = attributes;
return (T) this;
}
}
protected HiveTable(HiveTable.Builder<?> builder) {
this.name = builder.name;
this.primaryKeys = Collections.unmodifiableList(builder.primaryKeys);
this.attributes = Collections.unmodifiableList(builder.attributes);
}
public String getName() {
return this.name;
}
public List<String> getPrimaryKeys() {
return this.primaryKeys;
}
public List<HiveAttribute> getAttributes() {
return this.attributes;
}
public void dropTable(HiveJdbcConnector conn, String jobId) throws SQLException {
String dropTableStmt = String.format(DROP_TABLE_STMT, getNameWithJobId(jobId));
conn.executeStatements(dropTableStmt);
}
protected String getNameWithJobId(String randomSuffix) {
return this.name + "_" + randomSuffix;
}
protected boolean hasNoNewColumn(HiveTable table) {
for (HiveAttribute attribute : table.attributes) {
if (!this.attributes.contains(attribute)) {
return false;
}
}
return true;
}
public abstract void createTable(HiveJdbcConnector conn, String randomTableSuffix) throws SQLException;
public abstract HiveTable addNewColumnsInSchema(HiveJdbcConnector conn, HiveTable table, String randomSuffix)
throws SQLException;
}
| 1,833 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/SerialCompactor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import org.apache.gobblin.compaction.Compactor;
import org.apache.gobblin.util.HiveJdbcConnector;
/**
* An implementation of compactor. This class assumes that the snapshot table
* and the delta tables are taken one after another (hence the name
* "SerialCompactor").
*/
public class SerialCompactor implements Compactor {
private static final Logger LOG = LoggerFactory.getLogger(SerialCompactor.class);
private static final String HIVE_DB_NAME = "hive.db.name";
private static final String HIVE_QUEUE_NAME = "hive.queue.name";
private static final String HIVE_USE_MAPJOIN = "hive.use.mapjoin";
private static final String HIVE_MAPJOIN_SMALLTABLE_FILESIZE = "hive.mapjoin.smalltable.filesize";
private static final String HIVE_AUTO_CONVERT_JOIN = "hive.auto.convert.join";
private static final String HIVE_INPUT_SPLIT_SIZE = "hive.input.split.size";
private static final String MAPRED_MIN_SPLIT_SIZE = "mapred.min.split.size";
private static final String MAPREDUCE_JOB_REDUCES = "mapreduce.job.reduces";
private static final String MAPREDUCE_JOB_NUM_REDUCERS = "mapreduce.job.num.reducers";
private static final String MAPREDUCE_JOB_QUEUENAME = "mapreduce.job.queuename";
private final AvroExternalTable snapshot;
private final List<AvroExternalTable> deltas;
private final String outputTableName;
private final String outputDataLocationInHdfs;
private final AvroExternalTable latestTable;
private final String jobId;
private HiveJdbcConnector conn;
public static class Builder {
private AvroExternalTable snapshot;
private List<AvroExternalTable> deltas;
private String outputTableName;
private String outputDataLocationInHdfs;
public Builder withSnapshot(AvroExternalTable snapshot) {
this.snapshot = snapshot;
return this;
}
public Builder withDeltas(List<AvroExternalTable> deltas) {
Preconditions.checkArgument(deltas.size() >= 1, "Number of delta tables should be at least 1");
this.deltas = deltas;
return this;
}
public Builder withOutputTableName(String outputTableName) {
this.outputTableName = outputTableName;
return this;
}
public Builder withOutputDataLocationInHdfs(String outputDataLocationInHdfs) {
this.outputDataLocationInHdfs = outputDataLocationInHdfs;
return this;
}
public SerialCompactor build() {
return new SerialCompactor(this);
}
}
private SerialCompactor(SerialCompactor.Builder builder) {
this.snapshot = builder.snapshot;
this.deltas = builder.deltas;
this.outputTableName = builder.outputTableName;
this.outputDataLocationInHdfs = builder.outputDataLocationInHdfs;
this.latestTable = this.deltas.get(this.deltas.size() - 1);
this.jobId = UUID.randomUUID().toString().replaceAll("-", "_");
}
@Override
public void compact() throws IOException {
checkSchemaCompatibility();
Closer closer = Closer.create();
try {
this.conn = closer.register(HiveJdbcConnector.newConnectorWithProps(CompactionRunner.properties));
setHiveParameters();
createTables();
HiveTable mergedDelta = mergeDeltas();
HiveManagedTable notUpdated = getNotUpdatedRecords(this.snapshot, mergedDelta);
unionNotUpdatedRecordsAndDeltas(notUpdated, mergedDelta);
} catch (SQLException e) {
LOG.error("SQLException during compaction: " + e.getMessage());
throw new RuntimeException(e);
} catch (IOException e) {
LOG.error("IOException during compaction: " + e.getMessage());
throw new RuntimeException(e);
} catch (RuntimeException e) {
LOG.error("Runtime Exception during compaction: " + e.getMessage());
throw e;
} finally {
try {
deleteTmpFiles();
} finally {
closer.close();
}
}
}
private void checkSchemaCompatibility() {
for (int i = 0; i < this.deltas.size(); i++) {
if (!this.snapshot.hasSamePrimaryKey(this.deltas.get(i))) {
String message = "Schema incompatible: the snapshot table and delta table #" + (i + 1)
+ " do not have the same primary key.";
LOG.error(message);
throw new RuntimeException(message);
}
}
}
private void setHiveParameters() throws SQLException {
setHiveQueueName();
setHiveDbName();
setHiveMapjoin();
setHiveInputSplitSize();
setNumberOfReducers();
}
private void setHiveQueueName() throws SQLException {
this.conn.executeStatements("set " + MAPREDUCE_JOB_QUEUENAME + "="
+ CompactionRunner.jobProperties.getProperty(HIVE_QUEUE_NAME, "default"));
}
private void setHiveDbName() throws SQLException {
this.conn.executeStatements("use " + CompactionRunner.jobProperties.getProperty(HIVE_DB_NAME, "default"));
}
private void setHiveMapjoin() throws SQLException {
boolean useMapjoin = Boolean.parseBoolean(CompactionRunner.jobProperties.getProperty(HIVE_USE_MAPJOIN, "false"));
boolean smallTableSizeSpecified = CompactionRunner.jobProperties.containsKey(HIVE_MAPJOIN_SMALLTABLE_FILESIZE);
if (useMapjoin && smallTableSizeSpecified) {
this.conn.executeStatements("set " + HIVE_AUTO_CONVERT_JOIN + "=true");
this.conn.executeStatements("set " + HIVE_MAPJOIN_SMALLTABLE_FILESIZE + "="
+ CompactionRunner.jobProperties.getProperty(HIVE_MAPJOIN_SMALLTABLE_FILESIZE));
}
}
private void setHiveInputSplitSize() throws SQLException {
boolean splitSizeSpecified = CompactionRunner.jobProperties.containsKey(HIVE_INPUT_SPLIT_SIZE);
if (splitSizeSpecified) {
this.conn.executeStatements(
"set " + MAPRED_MIN_SPLIT_SIZE + "=" + CompactionRunner.jobProperties.getProperty(HIVE_INPUT_SPLIT_SIZE));
}
}
private void setNumberOfReducers() throws SQLException {
boolean numOfReducersSpecified = CompactionRunner.jobProperties.containsKey(MAPREDUCE_JOB_NUM_REDUCERS);
if (numOfReducersSpecified) {
this.conn.executeStatements("set " + MAPREDUCE_JOB_REDUCES + "="
+ CompactionRunner.jobProperties.getProperty(MAPREDUCE_JOB_NUM_REDUCERS));
}
}
private void createTables() throws SQLException {
this.snapshot.createTable(this.conn, this.jobId);
for (AvroExternalTable delta : this.deltas) {
delta.createTable(this.conn, this.jobId);
}
}
private HiveTable mergeDeltas() throws SQLException {
if (this.deltas.size() == 1) {
LOG.info("Only one delta table: no need to merge delta");
return this.deltas.get(0);
}
HiveManagedTable mergedDelta =
new HiveManagedTable.Builder().withName("merged_delta").withAttributes(this.deltas.get(0).getAttributes())
.withPrimaryKeys(this.deltas.get(0).getPrimaryKeys()).build();
mergedDelta.createTable(this.conn, this.jobId);
insertFirstDeltaIntoMergedDelta(mergedDelta);
this.deltas.get(0).dropTable(this.conn, this.jobId);
for (int i = 1; i < this.deltas.size(); i++) {
mergedDelta = mergeTwoDeltas(mergedDelta, this.deltas.get(i));
LOG.info("Merged the first " + (i + 1) + " delta tables");
this.deltas.get(i).dropTable(this.conn, this.jobId);
}
return mergedDelta;
}
private void insertFirstDeltaIntoMergedDelta(HiveManagedTable mergedDelta) throws SQLException {
String insertStmt = "INSERT OVERWRITE TABLE " + mergedDelta.getNameWithJobId(this.jobId) + " SELECT * FROM "
+ this.deltas.get(0).getNameWithJobId(this.jobId);
this.conn.executeStatements(insertStmt);
}
private HiveManagedTable mergeTwoDeltas(HiveManagedTable mergedDelta, AvroExternalTable nextDelta)
throws SQLException {
HiveManagedTable notUpdated = getNotUpdatedRecords(mergedDelta, nextDelta);
HiveTable notUpdatedWithNewSchema = notUpdated.addNewColumnsInSchema(this.conn, this.latestTable, this.jobId);
HiveTable nextDeltaWithNewSchema = nextDelta.addNewColumnsInSchema(this.conn, this.latestTable, this.jobId);
mergedDelta = new HiveManagedTable.Builder().withName(mergedDelta.getName())
.withAttributes(this.latestTable.getAttributes()).withPrimaryKeys(this.latestTable.getPrimaryKeys()).build();
mergedDelta.createTable(this.conn, this.jobId);
String unionStmt = "INSERT OVERWRITE TABLE " + mergedDelta.getNameWithJobId(this.jobId) + " SELECT "
+ getAttributesInNewSchema() + " FROM " + notUpdatedWithNewSchema.getNameWithJobId(this.jobId) + " UNION ALL "
+ "SELECT " + getAttributesInNewSchema() + " FROM " + nextDeltaWithNewSchema.getNameWithJobId(this.jobId);
this.conn.executeStatements(unionStmt);
nextDelta.dropTable(this.conn, this.jobId);
return mergedDelta;
}
private String getAttributesInNewSchema() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < this.latestTable.getAttributes().size(); i++) {
sb.append(this.latestTable.getAttributes().get(i).name());
if (i < this.latestTable.getAttributes().size() - 1) {
sb.append(", ");
}
}
return sb.toString();
}
private HiveManagedTable getNotUpdatedRecords(HiveTable oldTable, HiveTable newTable) throws SQLException {
LOG.info("Getting records in table " + oldTable.getNameWithJobId(this.jobId) + " but not in table "
+ newTable.getNameWithJobId(this.jobId));
HiveManagedTable notUpdated = new HiveManagedTable.Builder().withName("not_updated")
.withPrimaryKeys(oldTable.getPrimaryKeys()).withAttributes(oldTable.getAttributes()).build();
notUpdated.createTable(this.conn, this.jobId);
String leftOuterJoinStmt = "INSERT OVERWRITE TABLE " + notUpdated.getNameWithJobId(this.jobId) + " SELECT "
+ oldTable.getNameWithJobId(this.jobId) + ".* FROM " + oldTable.getNameWithJobId(this.jobId)
+ " LEFT OUTER JOIN " + newTable.getNameWithJobId(this.jobId) + " ON " + getJoinCondition(oldTable, newTable)
+ " WHERE " + getKeyIsNullPredicate(newTable);
this.conn.executeStatements(leftOuterJoinStmt);
oldTable.dropTable(this.conn, this.jobId);
return notUpdated;
}
private String getJoinCondition(HiveTable firstTable, HiveTable secondTable) {
if (!firstTable.getPrimaryKeys().equals(secondTable.getPrimaryKeys())) {
throw new RuntimeException("The primary keys of table " + firstTable.getName() + " and table "
+ secondTable.getName() + " are different");
}
boolean addAnd = false;
StringBuilder sb = new StringBuilder();
for (String keyAttribute : firstTable.getPrimaryKeys()) {
if (addAnd) {
sb.append(" AND ");
}
sb.append(firstTable.getNameWithJobId(this.jobId) + "." + keyAttribute + " = "
+ secondTable.getNameWithJobId(this.jobId) + "." + keyAttribute);
addAnd = true;
}
return sb.toString();
}
private String getKeyIsNullPredicate(HiveTable table) {
boolean addAnd = false;
StringBuilder sb = new StringBuilder();
for (String keyAttribute : table.getPrimaryKeys()) {
if (addAnd) {
sb.append(" AND ");
}
sb.append(table.getNameWithJobId(this.jobId) + "." + keyAttribute + " IS NULL");
addAnd = true;
}
return sb.toString();
}
private AvroExternalTable unionNotUpdatedRecordsAndDeltas(HiveManagedTable notUpdated, HiveTable mergedDelta)
throws IOException, SQLException {
LOG.info("Taking union of table " + notUpdated.getNameWithJobId(this.jobId)
+ "(records in snapshot but not in delta) and table " + mergedDelta.getNameWithJobId(this.jobId)
+ "(merged delta)");
HiveTable notUpdatedWithNewSchema = notUpdated.addNewColumnsInSchema(this.conn, this.latestTable, this.jobId);
HiveTable mergedDeltaWithNewSchema = mergedDelta.addNewColumnsInSchema(this.conn, this.latestTable, this.jobId);
AvroExternalTable outputTable = new AvroExternalTable.Builder().withName(this.outputTableName)
.withPrimaryKeys(this.latestTable.getPrimaryKeys())
.withSchemaLocation(this.latestTable.getSchemaLocationInHdfs()).withDataLocation(this.outputDataLocationInHdfs)
.build();
outputTable.createTable(this.conn, this.jobId);
String unionStmt = "INSERT OVERWRITE TABLE " + outputTable.getNameWithJobId(this.jobId) + " SELECT "
+ getAttributesInNewSchema() + " FROM " + notUpdatedWithNewSchema.getNameWithJobId(this.jobId) + " UNION ALL "
+ "SELECT " + getAttributesInNewSchema() + " FROM " + mergedDeltaWithNewSchema.getNameWithJobId(this.jobId);
this.conn.executeStatements(unionStmt);
notUpdatedWithNewSchema.dropTable(this.conn, this.jobId);
mergedDeltaWithNewSchema.dropTable(this.conn, this.jobId);
return outputTable;
}
private void deleteTmpFiles() throws IllegalArgumentException, IOException {
this.snapshot.deleteTmpFilesIfNeeded();
for (AvroExternalTable delta : this.deltas) {
delta.deleteTmpFilesIfNeeded();
}
}
@Override
public void cancel() throws IOException {
// Do nothing
}
}
| 1,834 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HiveManagedTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.sql.SQLException;
import org.apache.gobblin.util.HiveJdbcConnector;
/**
* A class for managing Hive managed tables.
*/
public class HiveManagedTable extends HiveTable {
public static class Builder extends HiveTable.Builder<Builder> {
public HiveManagedTable build() {
return new HiveManagedTable(this);
}
}
private HiveManagedTable(HiveManagedTable.Builder builder) {
super(builder);
}
public void createTable(HiveJdbcConnector conn, String jobId, String tableType) throws SQLException {
String tableName = getNameWithJobId(jobId);
String dropTableStmt = String.format(DROP_TABLE_STMT, tableName);
StringBuilder sb = new StringBuilder().append("CREATE ");
sb.append(tableType + " ");
sb.append(tableName);
sb.append('(');
for (int i = 0; i < this.attributes.size(); i++) {
sb.append(this.attributes.get(i).name() + " " + this.attributes.get(i).type());
if (i != this.attributes.size() - 1) {
sb.append(", ");
}
}
sb.append(")");
String createTableStmt = sb.toString();
conn.executeStatements(dropTableStmt, createTableStmt);
}
@Override
public void createTable(HiveJdbcConnector conn, String randomSuffix) throws SQLException {
createTable(conn, randomSuffix, "TABLE");
}
public void createTemporaryTable(HiveJdbcConnector conn, String randomSuffix) throws SQLException {
createTable(conn, randomSuffix, "TEMPORARY TABLE");
}
@Override
public HiveTable addNewColumnsInSchema(HiveJdbcConnector conn, HiveTable table, String randomSuffix)
throws SQLException {
if (hasNoNewColumn(table)) {
return this;
}
StringBuilder sb =
new StringBuilder().append("ALTER TABLE " + this.getNameWithJobId(randomSuffix) + " ADD COLUMNS (");
boolean addComma = false;
for (HiveAttribute attribute : table.attributes) {
if (!this.attributes.contains(attribute)) {
if (addComma) {
sb.append(", ");
}
sb.append(attribute.name() + " " + attribute.type());
addComma = true;
this.attributes.add(attribute);
}
}
sb.append(')');
String alterTableStmt = sb.toString();
conn.executeStatements(alterTableStmt);
return this;
}
}
| 1,835 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HiveAttribute.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
/**
* An immutable class for managing Hive attributes.
*/
public final class HiveAttribute {
private final String name;
private final Type type;
public enum Type {
TINYINT,
SMALLINT,
INT,
BIGINT,
FLOAT,
DOUBLE,
DECIMAL,
TIMESTAMP,
DATE,
STRING,
VARCHAR,
CHAR,
BOOLEAN,
BINARY
}
private enum AvroType {
BOOLEAN(Type.BOOLEAN),
INT(Type.INT),
LONG(Type.BIGINT),
FLOAT(Type.FLOAT),
DOUBLE(Type.DOUBLE),
BYTES(Type.BINARY),
STRING(Type.STRING),
ENUM(Type.STRING);
private final Type hiveType;
private AvroType(Type hiveType) {
this.hiveType = hiveType;
}
}
public static Type fromAvroType(String avroTypeString) {
try {
AvroType.valueOf(avroTypeString.toUpperCase());
return AvroType.valueOf(avroTypeString).hiveType;
} catch (java.lang.RuntimeException e) {
return null;
}
}
public HiveAttribute(String name, Type type) {
this.name = name;
this.type = type;
}
public HiveAttribute(HiveAttribute attr) {
this.name = attr.name;
this.type = attr.type;
}
public String name() {
return this.name;
}
public Type type() {
return this.type;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.name == null) ? 0 : this.name.hashCode());
result = prime * result + ((this.type == null) ? 0 : this.type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof HiveAttribute)) {
return false;
}
HiveAttribute other = (HiveAttribute) obj;
if (this.name == null) {
if (other.name != null) {
return false;
}
} else if (!this.name.equals(other.name)) {
return false;
}
if (this.type != other.type) {
return false;
}
return true;
}
}
| 1,836 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/CompactionRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.compaction.CliOptions;
import org.apache.gobblin.compaction.mapreduce.MRCompactionRunner;
/**
* Run Hive compaction based on config files.
*/
public class CompactionRunner {
private static final Logger LOG = LoggerFactory.getLogger(CompactionRunner.class);
private static final String COMPACTION_CONFIG_DIR = "compaction.config.dir";
private static final String TIMING_FILE = "timing.file";
private static final String TIMING_FILE_DEFAULT = "time.txt";
private static final String SNAPSHOT = "snapshot";
private static final String DELTA = "delta";
private static final String NAME = ".name";
private static final String PKEY = ".pkey";
private static final String DATALOCATION = ".datalocation";
private static final String SCHEMALOCATION = ".schemalocation";
private static final String COPYDATA = ".copydata";
private static final String COPYDATA_DEFAULT = "false";
private static final String DATAFORMAT_EXTENSION_NAME = ".dataformat.extension.name";
private static final String OUTPUT = "output";
static Properties properties = new Properties();
static Properties jobProperties = new Properties();
public static void main(String[] args) throws IOException, ConfigurationException {
properties = CliOptions.parseArgs(MRCompactionRunner.class, args);
File compactionConfigDir = new File(properties.getProperty(COMPACTION_CONFIG_DIR));
File[] listOfFiles = compactionConfigDir.listFiles();
if (listOfFiles == null || listOfFiles.length == 0) {
System.err.println("No compaction configuration files found under " + compactionConfigDir);
System.exit(1);
}
int numOfJobs = 0;
for (File file : listOfFiles) {
if (file.isFile() && !file.getName().startsWith(".")) {
numOfJobs++;
}
}
LOG.info("Found " + numOfJobs + " compaction tasks.");
try (PrintWriter pw = new PrintWriter(new OutputStreamWriter(
new FileOutputStream(properties.getProperty(TIMING_FILE, TIMING_FILE_DEFAULT)), Charset.forName("UTF-8")))) {
for (File file : listOfFiles) {
if (file.isFile() && !file.getName().startsWith(".")) {
Configuration jobConfig = new PropertiesConfiguration(file.getAbsolutePath());
jobProperties = ConfigurationConverter.getProperties(jobConfig);
long startTime = System.nanoTime();
compact();
long endTime = System.nanoTime();
long elapsedTime = endTime - startTime;
double seconds = TimeUnit.NANOSECONDS.toSeconds(elapsedTime);
pw.printf("%s: %f%n", file.getAbsolutePath(), seconds);
}
}
}
}
private static void compact() throws IOException {
SerialCompactor sc = new SerialCompactor.Builder().withSnapshot(buildSnapshotTable()).withDeltas(buildDeltaTables())
.withOutputTableName(jobProperties.getProperty(OUTPUT + NAME))
.withOutputDataLocationInHdfs(jobProperties.getProperty(OUTPUT + DATALOCATION)).build();
sc.compact();
}
private static AvroExternalTable buildSnapshotTable() throws IOException {
return buildAvroExternalTable(SNAPSHOT);
}
private static List<AvroExternalTable> buildDeltaTables() throws IOException {
List<AvroExternalTable> deltas = new ArrayList<>();
for (int i = 1;; i++) {
String deltai = DELTA + "." + i;
if (jobProperties.getProperty(deltai + DATALOCATION) == null) {
break;
}
deltas.add(buildAvroExternalTable(deltai));
}
return deltas;
}
private static AvroExternalTable buildAvroExternalTable(String tableType) throws IOException {
AvroExternalTable.Builder builder =
new AvroExternalTable.Builder().withName(jobProperties.getProperty(tableType + NAME, ""))
.withPrimaryKeys(jobProperties.getProperty(tableType + PKEY))
.withSchemaLocation(jobProperties.getProperty(tableType + SCHEMALOCATION, ""))
.withDataLocation(jobProperties.getProperty(tableType + DATALOCATION));
if (Boolean.parseBoolean(jobProperties.getProperty(tableType + COPYDATA, COPYDATA_DEFAULT))) {
builder = builder.withMoveDataToTmpHdfsDir(jobProperties.getProperty(tableType + DATAFORMAT_EXTENSION_NAME, ""));
}
return builder.build();
}
}
| 1,837 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/HdfsIO.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.IOException;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Strings;
/**
* Management for HDFS reads and writes.
*/
public abstract class HdfsIO {
private static final Logger LOG = LoggerFactory.getLogger(HdfsIO.class);
private static final String HDFS_URI = "hdfs.uri";
private static final String HDFS_URI_DEFAULT = "hdfs://localhost:9000";
private static final String HADOOP_CONFIGFILE_ = "hadoop.configfile.";
private static final String HDFS_URI_HADOOP = "fs.defaultFS";
@Deprecated // Gobblin only supports Hadoop 2.x.x
private static final String HADOOP_VERSION = "hadoop.version";
protected final String filePathInHdfs;
protected final FileSystem fileSystem;
public HdfsIO(String filePathInHdfs) throws IOException {
this.filePathInHdfs = filePathInHdfs;
this.fileSystem = getFileSystem();
}
protected static FileSystem getFileSystem() throws IOException {
Configuration conf = getConfiguration();
return FileSystem.get(conf);
}
protected static Configuration getConfiguration() {
Configuration conf = new Configuration();
addResourceToConf(conf);
return conf;
}
private static void addResourceToConf(Configuration conf) {
addHadoopConfigPropertiesToConf(conf);
if (CompactionRunner.properties.containsKey(HDFS_URI)) {
conf.set(HDFS_URI_HADOOP, CompactionRunner.properties.getProperty(HDFS_URI));
}
if (Strings.isNullOrEmpty(conf.get(HDFS_URI_HADOOP))) {
conf.set(HDFS_URI_HADOOP, HDFS_URI_DEFAULT);
}
CompactionRunner.properties.setProperty(HDFS_URI, conf.get(HDFS_URI_HADOOP));
}
private static void addHadoopConfigPropertiesToConf(Configuration conf) {
Set<String> propertyNames = CompactionRunner.properties.stringPropertyNames();
for (String propertyName : propertyNames) {
if (propertyName.startsWith(HADOOP_CONFIGFILE_)) {
String hadoopConfigFile = CompactionRunner.properties.getProperty(propertyName);
conf.addResource(new Path(hadoopConfigFile));
LOG.info("Added Hadoop Config File: " + hadoopConfigFile);
}
}
}
public static String getHdfsUri() {
return CompactionRunner.properties.getProperty(HDFS_URI, HDFS_URI_DEFAULT);
}
}
| 1,838 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/AvroExternalTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive;
import java.io.IOException;
import java.io.InputStream;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.util.HiveJdbcConnector;
/**
* A class for managing Hive external tables created on Avro files.
*/
public class AvroExternalTable extends HiveTable {
private static final Logger LOG = LoggerFactory.getLogger(AvroExternalTable.class);
private static final String HIVE_TMPSCHEMA_DIR = "hive.tmpschema.dir";
private static final String HIVE_TMPDATA_DIR = "hive.tmpdata.dir";
private static final String HIVE_TMPDATA_DIR_DEFAULT = "/";
private static final String CREATE_TABLE_STMT =
"CREATE EXTERNAL TABLE %1$s " + " ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'" + " STORED AS"
+ " INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'"
+ " OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'" + " LOCATION '%2$s'"
+ " TBLPROPERTIES ('avro.schema.url'='%3$s')";
private final String dataLocationInHdfs;
private final String schemaLocationInHdfs;
private final boolean deleteSchemaAfterDone;
private final boolean deleteDataAfterDone;
public static class Builder extends HiveTable.Builder<AvroExternalTable.Builder> {
private String dataLocationInHdfs = "";
private String schemaLocationInHdfs = "";
private boolean moveDataToTmpHdfsDir = false;
private String extensionToBeMoved;
public Builder withDataLocation(String dataLocationInHdfs) {
this.dataLocationInHdfs = dataLocationInHdfs;
return this;
}
public Builder withSchemaLocation(String schemaLocationInHdfs) {
this.schemaLocationInHdfs = schemaLocationInHdfs;
return this;
}
public Builder withMoveDataToTmpHdfsDir(String extensionToBeMoved) {
this.moveDataToTmpHdfsDir = true;
this.extensionToBeMoved = extensionToBeMoved;
return this;
}
public AvroExternalTable build() throws IOException {
return new AvroExternalTable(this);
}
}
private AvroExternalTable(AvroExternalTable.Builder builder) throws IOException {
super(builder);
if (builder.moveDataToTmpHdfsDir) {
this.dataLocationInHdfs = moveDataFileToSeparateHdfsDir(builder.dataLocationInHdfs, builder.extensionToBeMoved);
this.deleteDataAfterDone = true;
} else {
this.dataLocationInHdfs = builder.dataLocationInHdfs;
this.deleteDataAfterDone = false;
}
if (StringUtils.isNotBlank(builder.schemaLocationInHdfs)) {
this.schemaLocationInHdfs = builder.schemaLocationInHdfs;
this.attributes = getAttributesFromAvroSchemaFile();
this.deleteSchemaAfterDone = false;
} else {
Schema schema = getSchemaFromAvroDataFile();
this.attributes = parseSchema(schema);
this.schemaLocationInHdfs = writeSchemaToHdfs(schema);
this.deleteSchemaAfterDone = true;
}
}
private List<HiveAttribute> getAttributesFromAvroSchemaFile() throws IOException {
try (InputStream schemaInputStream = new HdfsReader(this.schemaLocationInHdfs).getInputStream()) {
Schema schema = new Schema.Parser().parse(schemaInputStream);
return parseSchema(schema);
}
}
private Schema getSchemaFromAvroDataFile() throws IOException {
String firstDataFilePath = HdfsReader.getFirstDataFilePathInDir(this.dataLocationInHdfs);
LOG.info("Extracting schema for table " + this.name + " from avro data file " + firstDataFilePath);
SeekableInput sin = new HdfsReader(firstDataFilePath).getFsInput();
try (DataFileReader<Void> dfr = new DataFileReader<>(sin, new GenericDatumReader<Void>())) {
Schema schema = dfr.getSchema();
return schema;
}
}
private String writeSchemaToHdfs(Schema schema) throws IOException {
String defaultTmpSchemaDir = getParentDir(this.dataLocationInHdfs);
String tmpSchemaDir = CompactionRunner.jobProperties.getProperty(HIVE_TMPSCHEMA_DIR, defaultTmpSchemaDir);
tmpSchemaDir = addSlash(tmpSchemaDir);
String tmpSchemaPath = tmpSchemaDir + UUID.randomUUID().toString() + ".avsc";
HdfsWriter writer = new HdfsWriter(tmpSchemaPath);
LOG.info("writing schema to HDFS location " + tmpSchemaPath);
writer.write(schema.toString(true));
return tmpSchemaPath;
}
private static String getParentDir(String filePathInHdfs) {
return new Path(filePathInHdfs).getParent().toString();
}
private static List<HiveAttribute> parseSchema(Schema schema) {
List<HiveAttribute> attributes = new ArrayList<>();
List<Schema.Field> fields = schema.getFields();
for (Schema.Field field : fields) {
attributes.add(convertAvroSchemaFieldToHiveAttribute(field));
}
return attributes;
}
private static HiveAttribute convertAvroSchemaFieldToHiveAttribute(Schema.Field field) {
String avroFieldType = field.schema().getType().toString();
if (avroFieldType.equalsIgnoreCase("UNION")) {
avroFieldType = extractAvroTypeFromUnion(field);
}
if (HiveAttribute.fromAvroType(avroFieldType) == null) {
throw new RuntimeException("Hive does not support attribute type '" + avroFieldType + "'");
}
return new HiveAttribute(field.name(), HiveAttribute.fromAvroType(avroFieldType));
}
private static String extractAvroTypeFromUnion(Schema.Field field) {
if (field.schema().getTypes().size() >= 3) {
LOG.warn("Avro schema field " + field.name() + " has 3 or more types: using the first non-null type");
}
for (Schema schema : field.schema().getTypes()) {
if (!schema.getType().toString().equalsIgnoreCase("NULL")) {
return schema.getType().toString();
}
}
String message =
"Avro schema field " + field.name() + " is a union, but it does not contain a non-null field type.";
LOG.error(message);
throw new RuntimeException(message);
}
public String getDataLocationInHdfs() {
return this.dataLocationInHdfs;
}
public String getSchemaLocationInHdfs() {
return this.schemaLocationInHdfs;
}
@Override
public void createTable(HiveJdbcConnector conn, String jobID) throws SQLException {
String tableName = getNameWithJobId(jobID);
String dropTableStmt = String.format(DROP_TABLE_STMT, tableName);
String hdfsUri = HdfsIO.getHdfsUri();
String createTableStmt = String.format(CREATE_TABLE_STMT, tableName, hdfsUri + this.dataLocationInHdfs,
hdfsUri + this.schemaLocationInHdfs);
conn.executeStatements(dropTableStmt, createTableStmt);
}
@Override
public HiveTable addNewColumnsInSchema(HiveJdbcConnector conn, HiveTable table, String jobId) throws SQLException {
if (hasNoNewColumn(table)) {
return this;
}
HiveManagedTable managedTable = new HiveManagedTable.Builder().withName(this.name).withPrimaryKeys(this.primaryKeys)
.withAttributes(this.attributes).build();
return managedTable.addNewColumnsInSchema(null, table, jobId);
}
protected void deleteTmpFilesIfNeeded() throws IllegalArgumentException, IOException {
if (this.deleteSchemaAfterDone) {
new HdfsWriter(this.schemaLocationInHdfs).delete();
}
if (this.deleteDataAfterDone) {
new HdfsWriter(this.dataLocationInHdfs).delete();
}
}
private String moveDataFileToSeparateHdfsDir(String sourceDir, String extension) throws IOException {
String parentDir = CompactionRunner.jobProperties.getProperty(HIVE_TMPDATA_DIR, HIVE_TMPDATA_DIR_DEFAULT);
parentDir = addSlash(parentDir);
String destination = parentDir + UUID.randomUUID().toString();
LOG.info("Moving data file of table " + this.getName() + " to " + destination);
HdfsWriter.moveSelectFiles(extension, sourceDir, destination);
LOG.info("Moved data file of table " + this.getName() + " to " + destination);
return destination;
}
private static String addSlash(String dir) {
if (!dir.endsWith("/") && !dir.endsWith("\\")) {
return dir + "/";
}
return dir;
}
public boolean hasSamePrimaryKey(AvroExternalTable other) {
return this.primaryKeys.containsAll(other.primaryKeys) && other.primaryKeys.containsAll(this.primaryKeys);
}
}
| 1,839 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/hive/registration/HiveRegistrationCompactorListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.hive.registration;
import java.util.Properties;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
public class HiveRegistrationCompactorListener implements CompactorListener {
private final HiveRegister hiveRegister;
private final HiveRegistrationPolicy hiveRegistrationPolicy;
public HiveRegistrationCompactorListener(Properties properties) {
State state = new State(properties);
this.hiveRegister = HiveRegister.get(state);
this.hiveRegistrationPolicy = HiveRegistrationPolicyBase.getPolicy(state);
}
@Override
public void onDatasetCompactionCompletion(Dataset dataset) throws Exception {
for (HiveSpec spec : this.hiveRegistrationPolicy.getHiveSpecs(dataset.outputPath())) {
this.hiveRegister.register(spec);
}
}
}
| 1,840 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* A utility class for {@link CompactionSuite}
*/
public class CompactionSuiteUtils {
/**
* Return an {@link CompactionSuiteFactory} based on the configuration
* @return A concrete suite factory instance. By default {@link CompactionSuiteBaseFactory} is used.
*/
public static CompactionSuiteFactory getCompactionSuiteFactory(State state) {
try {
String factoryName =
state.getProp(ConfigurationKeys.COMPACTION_SUITE_FACTORY, ConfigurationKeys.DEFAULT_COMPACTION_SUITE_FACTORY);
ClassAliasResolver<CompactionSuiteFactory> conditionClassAliasResolver =
new ClassAliasResolver<>(CompactionSuiteFactory.class);
CompactionSuiteFactory factory = conditionClassAliasResolver.resolveClass(factoryName).newInstance();
return factory;
} catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
}
| 1,841 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.configuration.State;
/**
* Build {@link CompactionSuite} for a job execution
*/
public interface CompactionSuiteFactory {
CompactionSuite createSuite (State state);
}
| 1,842 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteBaseWithConfigurableCompleteAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Compaction suite with configurable complete actions
*/
public class CompactionSuiteBaseWithConfigurableCompleteAction extends CompactionSuiteBase {
private final static String COMPACTION_COMPLETE_ACTIONS = "compaction.complete.actions";
/**
* Constructor
*/
public CompactionSuiteBaseWithConfigurableCompleteAction(State state) {
super(state);
}
/**
* Some post actions are required after compaction job (map-reduce) is finished.
*
* @return A list of {@link CompactionCompleteAction}s which needs to be executed after
* map-reduce is done.
*/
@Override
public List<CompactionCompleteAction<FileSystemDataset>> getCompactionCompleteActions() throws IOException {
Preconditions.checkArgument(state.contains(COMPACTION_COMPLETE_ACTIONS));
ArrayList<CompactionCompleteAction<FileSystemDataset>> compactionCompleteActionsList = new ArrayList<>();
try {
for (String s : state.getPropAsList(COMPACTION_COMPLETE_ACTIONS)) {
compactionCompleteActionsList.add((CompactionCompleteAction<FileSystemDataset>) GobblinConstructorUtils.invokeLongestConstructor(
Class.forName(s), state, getConfigurator(), new InputRecordCountHelper(state)));
}
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
return compactionCompleteActionsList;
}
}
| 1,843 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import com.google.gson.Gson;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.compaction.action.CompactionCompleteFileOperationAction;
import org.apache.gobblin.compaction.action.CompactionHiveRegistrationAction;
import org.apache.gobblin.compaction.action.CompactionMarkDirectoryAction;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.compaction.verify.CompactionAuditCountVerifier;
import org.apache.gobblin.compaction.verify.CompactionThresholdVerifier;
import org.apache.gobblin.compaction.verify.CompactionTimeRangeVerifier;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
import org.apache.hadoop.mapreduce.Job;
/**
* A type of {@link CompactionSuite} which implements all components needed for file compaction.
* The format-specific implementation is contained in the impl. of {@link CompactionJobConfigurator}
*/
@Slf4j
public class CompactionSuiteBase implements CompactionSuite<FileSystemDataset> {
protected State state;
/**
* Require lazy evaluation for now to support feature in
* {@link org.apache.gobblin.compaction.source.CompactionSource#optionalInit(SourceState)}
*/
private CompactionJobConfigurator configurator;
private static final Gson GSON = GsonInterfaceAdapter.getGson(FileSystemDataset.class);
private static final String SERIALIZED_DATASET = "compaction.serializedDataset";
/**
* Constructor
*/
public CompactionSuiteBase(State state) {
this.state = state;
}
/**
* Implementation of {@link CompactionSuite#getDatasetsFinderVerifiers()}
* @return A list of {@link CompactionVerifier} instances which will be verified after
* {@link FileSystemDataset} is found but before a {@link org.apache.gobblin.source.workunit.WorkUnit}
* is created.
*/
public List<CompactionVerifier<FileSystemDataset>> getDatasetsFinderVerifiers() {
List<CompactionVerifier<FileSystemDataset>> list = new LinkedList<>();
list.add(new CompactionTimeRangeVerifier(state));
list.add(new CompactionThresholdVerifier(state));
list.add(new CompactionAuditCountVerifier(state));
return list;
}
/**
* Implementation of {@link CompactionSuite#getMapReduceVerifiers()}
* @return A list of {@link CompactionVerifier} instances which will be verified before
* {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask} starts the map-reduce job
*/
public List<CompactionVerifier<FileSystemDataset>> getMapReduceVerifiers() {
return new ArrayList<>();
}
/**
* Serialize a dataset {@link FileSystemDataset} to a {@link State}
* @param dataset A dataset needs serialization
* @param state A state that is used to save {@link org.apache.gobblin.dataset.Dataset}
*/
public void save(FileSystemDataset dataset, State state) {
state.setProp(SERIALIZED_DATASET, GSON.toJson(dataset));
}
/**
* Deserialize a new {@link FileSystemDataset} from a given {@link State}
*
* @param state a type of {@link org.apache.gobblin.runtime.TaskState}
* @return A new instance of {@link FileSystemDataset}
*/
public FileSystemDataset load(final State state) {
return GSON.fromJson(state.getProp(SERIALIZED_DATASET), FileSystemDataset.class);
}
/**
* Some post actions are required after compaction job (map-reduce) is finished.
*
* @return A list of {@link CompactionCompleteAction}s which needs to be executed after
* map-reduce is done.
*/
public List<CompactionCompleteAction<FileSystemDataset>> getCompactionCompleteActions() throws IOException {
ArrayList<CompactionCompleteAction<FileSystemDataset>> compactionCompleteActionsList = new ArrayList<>();
compactionCompleteActionsList.add(new CompactionCompleteFileOperationAction(state, getConfigurator()));
compactionCompleteActionsList.add(new CompactionHiveRegistrationAction(state));
compactionCompleteActionsList.add(new CompactionMarkDirectoryAction(state, getConfigurator()));
return compactionCompleteActionsList;
}
/**
* Constructs a map-reduce job suitable for compaction. The detailed format-specific configuration
* work is delegated to {@link CompactionJobConfigurator#createJob(FileSystemDataset)}
*
* @param dataset a top level input path which contains all files those need to be compacted
* @return a map-reduce job which will compact files against {@link org.apache.gobblin.dataset.Dataset}
*/
public Job createJob(FileSystemDataset dataset) throws IOException {
return getConfigurator().createJob(dataset);
}
protected CompactionJobConfigurator getConfigurator() {
if (configurator == null) {
synchronized (this) {
configurator = CompactionJobConfigurator.instantiateConfigurator(this.state);
}
}
return configurator;
}
}
| 1,844 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionWithWatermarkSuiteFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
@Alias("CompactionWithWatermarkSuiteFactory")
public class CompactionWithWatermarkSuiteFactory implements CompactionSuiteFactory {
public CompactionWithWatermarkSuite createSuite (State state) {
return new CompactionWithWatermarkSuite(state);
}
}
| 1,845 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionWithWatermarkSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.compaction.action.CompactionCompleteFileOperationAction;
import org.apache.gobblin.compaction.action.CompactionHiveRegistrationAction;
import org.apache.gobblin.compaction.action.CompactionMarkDirectoryAction;
import org.apache.gobblin.compaction.action.CompactionWatermarkAction;
import org.apache.gobblin.compaction.verify.CompactionThresholdVerifier;
import org.apache.gobblin.compaction.verify.CompactionTimeRangeVerifier;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.compaction.verify.CompactionWatermarkChecker;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Compaction suite with watermark checking and publishing for file system dataset of
* path pattern, [path prefix]/[dataset name]/[partition prefix]/yyyy/MM/[dd/HH/mm], for
* example:
* <ul>
* <li> home/event1/hourly/2019/12/31 </li>
* <li> home/event2/hourly/2019/12/31/10 </li>
* <li> home/dbName/tableName/hourly/2019/12/31 </li>
* </ul>
*
* The watermarks are published to hive metastore
*/
public class CompactionWithWatermarkSuite extends CompactionSuiteBase {
/**
* Constructor
* @param state
*/
public CompactionWithWatermarkSuite(State state) {
super(state);
}
@Override
public List<CompactionVerifier<FileSystemDataset>> getDatasetsFinderVerifiers() {
List<CompactionVerifier<FileSystemDataset>> list = new LinkedList<>();
list.add(new CompactionTimeRangeVerifier(state));
list.add(new CompactionThresholdVerifier(state));
return list;
}
@Override
public List<CompactionVerifier<FileSystemDataset>> getMapReduceVerifiers() {
List<CompactionVerifier<FileSystemDataset>> list = new LinkedList<>();
list.add(new CompactionWatermarkChecker(state));
return list;
}
@Override
public List<CompactionCompleteAction<FileSystemDataset>> getCompactionCompleteActions() {
ArrayList<CompactionCompleteAction<FileSystemDataset>> array = new ArrayList<>();
array.add(new CompactionCompleteFileOperationAction(state, getConfigurator()));
array.add(new CompactionHiveRegistrationAction(state));
// Publish compaction watermarks right after hive registration
array.add(new CompactionWatermarkAction(state));
array.add(new CompactionMarkDirectoryAction(state, getConfigurator()));
return array;
}
}
| 1,846 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteBaseWithConfigurableCompleteActionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.configuration.State;
public class CompactionSuiteBaseWithConfigurableCompleteActionFactory extends CompactionSuiteBaseFactory {
public CompactionSuiteBaseWithConfigurableCompleteAction createSuite(State state) {
return new CompactionSuiteBaseWithConfigurableCompleteAction(state);
}
}
| 1,847 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.mapreduce.Job;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.compaction.mapreduce.MRCompactionTask;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.replication.ConfigBasedDatasetsFinder;
import org.apache.gobblin.dataset.Dataset;
/**
* This interface provides major components required by {@link org.apache.gobblin.compaction.source.CompactionSource}
* and {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask} flow.
*
* User needs to implement {@link #createJob(Dataset)} method to create a customized map-reduce job.
* Two types of {@link CompactionVerifier}s should be provided. One is to verify datasets returned by
* {@link ConfigBasedDatasetsFinder#findDatasets()}. The other is to verify datasets before we run MR
* job inside {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
*
* The class also handles how to create a map-reduce job and how to serialized and deserialize a {@link Dataset}
* to and from a {@link org.apache.gobblin.source.workunit.WorkUnit} properly.
*
* CompactionSuite should only be aware of verification methods and different definition of datasets,
* but unaware of data format, which is handled by different implementation of
* {@link org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator}
*/
public interface CompactionSuite<D extends Dataset> {
/**
* Deserialize and create a new dataset from existing state
*/
D load (State state);
/**
* Serialize an existing dataset to a state
*/
void save (D dataset, State state);
/**
* Get a list of verifiers for each dataset validation.
* Verifiers are executed by {@link org.apache.gobblin.compaction.source.CompactionSource#getWorkunits(SourceState)}
*/
List<CompactionVerifier<D>> getDatasetsFinderVerifiers();
/**
* Get a list of verifiers for each dataset validation.
* Verifiers are executed by {@link MRCompactionTask#run()}
*/
List<CompactionVerifier<D>> getMapReduceVerifiers();
/**
* Map-reduce job creation
*/
Job createJob(D dataset) throws IOException;
/**
* Get a list of completion actions after compaction is finished. Actions are listed in order
*/
List<CompactionCompleteAction<D>> getCompactionCompleteActions() throws IOException;
}
| 1,848 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/suite/CompactionSuiteBaseFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
/**
* A {@link CompactionSuiteFactory} that handles {@link CompactionSuiteBase} creation logic.
*/
@Alias("CompactionSuiteBaseFactory")
public class CompactionSuiteBaseFactory implements CompactionSuiteFactory {
public CompactionSuiteBase createSuite (State state) {
return new CompactionSuiteBase(state);
}
} | 1,849 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Functions;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.gobblin.compaction.Compactor;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetsFinder;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.listeners.CompactorCompletionListener;
import org.apache.gobblin.compaction.listeners.CompactorCompletionListenerFactory;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.compaction.verify.DataCompletenessVerifier;
import org.apache.gobblin.compaction.verify.DataCompletenessVerifier.Results;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricReporterException;
import org.apache.gobblin.metrics.MultiReporterException;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ClusterNameTags;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.recordcount.CompactionRecordCountProvider;
import org.apache.gobblin.util.recordcount.IngestionRecordCountProvider;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.compaction.dataset.Dataset.DatasetState.COMPACTION_COMPLETE;
import static org.apache.gobblin.compaction.dataset.Dataset.DatasetState.GIVEN_UP;
import static org.apache.gobblin.compaction.dataset.Dataset.DatasetState.UNVERIFIED;
import static org.apache.gobblin.compaction.dataset.Dataset.DatasetState.VERIFIED;
import static org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner.Status.ABORTED;
import static org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner.Status.COMMITTED;
/**
* MapReduce-based {@link org.apache.gobblin.compaction.Compactor}. Compaction will run on each qualified {@link Dataset}
* under {@link #COMPACTION_INPUT_DIR}.
*
* @author Ziyang Liu
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* instead of using timestamp only before.
*/
@Deprecated
public class MRCompactor implements Compactor {
private static final Logger LOG = LoggerFactory.getLogger(MRCompactor.class);
public static final String COMPACTION_PREFIX = "compaction.";
/**
* Basic compaction properties.
*/
public static final String COMPACTION_THREAD_POOL_SIZE = COMPACTION_PREFIX + "thread.pool.size";
public static final int DEFAULT_COMPACTION_THREAD_POOL_SIZE = 30;
public static final String COMPACTION_INPUT_DIR = COMPACTION_PREFIX + "input.dir";
// The subdir name of input dataset paths, e.g., "hourly" in "/data/input/PasswordChangeEvent/hourly/2015/09/06".
public static final String COMPACTION_INPUT_SUBDIR = COMPACTION_PREFIX + "input.subdir";
public static final String DEFAULT_COMPACTION_INPUT_SUBDIR = "hourly";
public static final String COMPACTION_DEST_DIR = COMPACTION_PREFIX + "dest.dir";
// The subdir name of output dataset paths, e.g., "daily" in "/data/input/PasswordChangeEvent/daily/2015/09/06".
public static final String COMPACTION_DEST_SUBDIR = COMPACTION_PREFIX + "dest.subdir";
public static final String DEFAULT_COMPACTION_DEST_SUBDIR = "daily";
// The output dir for compaction MR job, which will be moved to the final output dir for data publishing.
public static final String COMPACTION_TMP_DEST_DIR = COMPACTION_PREFIX + "tmp.dest.dir";
public static final String DEFAULT_COMPACTION_TMP_DEST_DIR = "/tmp/gobblin-compaction";
public static final String COMPACTION_JOB_DIR = COMPACTION_PREFIX + "tmp.job.dir";
public static final String COMPACTION_LATE_DIR_SUFFIX = "_late";
public static final String COMPACTION_BLACKLIST = COMPACTION_PREFIX + "blacklist";
public static final String COMPACTION_WHITELIST = COMPACTION_PREFIX + "whitelist";
public static final String COMPACTION_HIGH_PRIORITY_TOPICS = COMPACTION_PREFIX + "high.priority.topics";
public static final String COMPACTION_NORMAL_PRIORITY_TOPICS = COMPACTION_PREFIX + "normal.priority.topics";
public static final String COMPACTION_JOB_RUNNER_CLASS = COMPACTION_PREFIX + "job.runner.class";
public static final String DEFAULT_COMPACTION_JOB_RUNNER_CLASS =
"org.apache.gobblin.compaction.mapreduce.avro.MRCompactorAvroKeyDedupJobRunner";
public static final String COMPACTION_TIMEZONE = COMPACTION_PREFIX + "timezone";
public static final String DEFAULT_COMPACTION_TIMEZONE = ConfigurationKeys.PST_TIMEZONE_NAME;
public static final String COMPACTION_FILE_SYSTEM_URI = COMPACTION_PREFIX + "file.system.uri";
public static final String COMPACTION_MR_JOB_TIMEOUT_MINUTES = COMPACTION_PREFIX + "mr.job.timeout.minutes";
public static final long DEFAULT_COMPACTION_MR_JOB_TIMEOUT_MINUTES = Long.MAX_VALUE;
// Dataset finder to find datasets for compaction.
public static final String COMPACTION_DATASETS_FINDER = COMPACTION_PREFIX + "datasets.finder";
public static final String DEFAULT_COMPACTION_DATASETS_FINDER = TimeBasedSubDirDatasetsFinder.class.getName();
public static final String COMPACTION_DATASETS_MAX_COUNT = COMPACTION_PREFIX + "datasets.max.count";
public static final double DEFUALT_COMPACTION_DATASETS_MAX_COUNT = 1000000000;
// Rename source directories as a compaction complete indication
// Compaction jobs using this completion mode can't share input sources
public static final String COMPACTION_RENAME_SOURCE_DIR_ENABLED = COMPACTION_PREFIX + "rename.source.dir.enabled";
public static final boolean DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED = false;
public static final String COMPACTION_RENAME_SOURCE_DIR_SUFFIX = "_COMPLETE";
//The provider that provides event counts for the compaction input files.
public static final String COMPACTION_INPUT_RECORD_COUNT_PROVIDER = COMPACTION_PREFIX + "input.record.count.provider";
public static final String DEFAULT_COMPACTION_INPUT_RECORD_COUNT_PROVIDER =
IngestionRecordCountProvider.class.getName();
//The provider that provides event counts for the compaction output files.
public static final String COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER =
COMPACTION_PREFIX + "output.record.count.provider";
public static final String DEFAULT_COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER =
CompactionRecordCountProvider.class.getName();
// If a dataset has already been compacted and new (late) data is found, whether recompact this dataset.
public static final String COMPACTION_RECOMPACT_FROM_INPUT_FOR_LATE_DATA =
COMPACTION_PREFIX + "recompact.from.input.for.late.data";
public static final boolean DEFAULT_COMPACTION_RECOMPACT_FROM_INPUT_FOR_LATE_DATA = false;
// The threshold of new(late) data that will trigger recompaction per dataset.
// It follows the pattern DATASET_NAME_REGEX:THRESHOLD;DATASET_NAME_REGEX:THRESHOLD, e.g., A.*,B.*:0.2; C.*,D.*:0.3.
// Dataset names that match A.* or B.* will have threshold 0.2. Dataset names that match C.* or D.* will have threshold 0.3.
public static final String COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET =
COMPACTION_PREFIX + "latedata.threshold.for.recompact.per.topic";
public static final double DEFAULT_COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET = 1.0;
// The threshold of new (late) files that will trigger compaction per dataset.
// The trigger is based on the file numbers in the late output directory
public static final String COMPACTION_LATEDATA_THRESHOLD_FILE_NUM =
COMPACTION_PREFIX + "latedata.threshold.file.num";
public static final int DEFAULT_COMPACTION_LATEDATA_THRESHOLD_FILE_NUM = 1000;
// The threshold of new (late) files that will trigger compaction per dataset.
// The trigger is based on how long the file has been in the late output directory.
public static final String COMPACTION_LATEDATA_THRESHOLD_DURATION =
COMPACTION_PREFIX + "latedata.threshold.duration";
public static final String DEFAULT_COMPACTION_LATEDATA_THRESHOLD_DURATION = "24h";
public static final String COMPACTION_RECOMPACT_CONDITION = COMPACTION_PREFIX + "recompact.condition";
public static final String DEFAULT_COMPACTION_RECOMPACT_CONDITION = "RecompactBasedOnRatio";
public static final String COMPACTION_RECOMPACT_COMBINE_CONDITIONS = COMPACTION_PREFIX + "recompact.combine.conditions";
public static final String COMPACTION_RECOMPACT_COMBINE_CONDITIONS_OPERATION = COMPACTION_PREFIX + "recompact.combine.conditions.operation";
public static final String DEFAULT_COMPACTION_RECOMPACT_COMBINE_CONDITIONS_OPERATION = "or";
public static final String COMPACTION_COMPLETE_LISTERNER = COMPACTION_PREFIX + "complete.listener";
public static final String DEFAULT_COMPACTION_COMPLETE_LISTERNER = "SimpleCompactorCompletionHook";
// Whether the input data for the compaction is deduplicated.
public static final String COMPACTION_INPUT_DEDUPLICATED = COMPACTION_PREFIX + "input.deduplicated";
public static final boolean DEFAULT_COMPACTION_INPUT_DEDUPLICATED = false;
// Whether the output of the compaction should be deduplicated.
public static final String COMPACTION_OUTPUT_DEDUPLICATED = COMPACTION_PREFIX + "output.deduplicated";
public static final boolean DEFAULT_COMPACTION_OUTPUT_DEDUPLICATED = true;
public static final String COMPACTION_COMPLETENESS_VERIFICATION_PREFIX =
COMPACTION_PREFIX + "completeness.verification.";
public static final String COMPACTION_RECOMPACT_FROM_DEST_PATHS = COMPACTION_PREFIX + "recompact.from.dest.paths";
public static final String COMPACTION_RECOMPACT_ALL_DATA = COMPACTION_PREFIX + "recompact.all.data";
public static final boolean DEFAULT_COMPACTION_RECOMPACT_FROM_DEST_PATHS = false;
public static final boolean DEFAULT_COMPACTION_RECOMPACT_ALL_DATA = true;
/**
* Configuration properties related to data completeness verification.
*/
public static final String COMPACTION_COMPLETENESS_VERIFICATION_BLACKLIST =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "blacklist";
public static final String COMPACTION_COMPLETENESS_VERIFICATION_WHITELIST =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "whitelist";
public static final String COMPACTION_VERIFICATION_TIMEOUT_MINUTES =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "timeout.minutes";
public static final long DEFAULT_COMPACTION_VERIFICATION_TIMEOUT_MINUTES = 30;
public static final String COMPACTION_COMPLETENESS_VERIFICATION_ENABLED =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "enabled";
public static final boolean DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_ENABLED = false;
// Number of datasets to be passed to DataCompletenessVerifier together. By passing multiple datasets together,
// some costs in DataCompletenessVerifier (e.g., submitting a SQL query) can be amortized.
public static final String COMPACTION_COMPLETENESS_VERIFICATION_NUM_DATASETS_VERIFIED_TOGETHER =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "num.datasets.verified.together";
public static final int DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_NUM_DATASETS_VERIFIED_TOGETHER = 10;
// Whether to compact and publish a datatset if its completeness cannot be verified.
public static final String COMPACTION_COMPLETENESS_VERIFICATION_PUBLISH_DATA_IF_CANNOT_VERIFY =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "publish.data.if.cannot.verify";
public static final boolean DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_PUBLISH_DATA_IF_CANNOT_VERIFY = false;
/**
* Compaction configuration properties used internally.
*/
public static final String COMPACTION_SHOULD_DEDUPLICATE = COMPACTION_PREFIX + "should.deduplicate";
public static final String COMPACTION_JOB_DEST_PARTITION = COMPACTION_PREFIX + "job.dest.partition";
public static final String COMPACTION_ENABLE_SUCCESS_FILE =
COMPACTION_PREFIX + "fileoutputcommitter.marksuccessfuljobs";
public static final String COMPACTION_JOB_LATE_DATA_MOVEMENT_TASK = COMPACTION_PREFIX + "job.late.data.movement.task";
public static final String COMPACTION_JOB_LATE_DATA_FILES = COMPACTION_PREFIX + "job.late.data.files";
public static final String COMPACTION_COMPLETE_FILE_NAME = "_COMPACTION_COMPLETE";
public static final String COMPACTION_LATE_FILES_DIRECTORY = "late";
public static final String COMPACTION_JARS = COMPACTION_PREFIX + "jars";
public static final String COMPACTION_JAR_SUBDIR = "_gobblin_compaction_jars";
public static final String COMPACTION_TRACKING_EVENTS_NAMESPACE = COMPACTION_PREFIX + "tracking.events";
public static final String COMPACTION_INPUT_PATH_TIME = COMPACTION_PREFIX + "input.path.time";
public static final String COMPACTION_FILE_EXTENSION =
COMPACTION_PREFIX + "extension";
private static final long COMPACTION_JOB_WAIT_INTERVAL_SECONDS = 10;
private static final Map<Dataset, Job> RUNNING_MR_JOBS = Maps.newConcurrentMap();
private final State state;
private final List<? extends Tag<?>> tags;
private final Configuration conf;
private final String tmpOutputDir;
private final FileSystem fs;
private final JobRunnerExecutor jobExecutor;
private final Set<Dataset> datasets;
private final Map<Dataset, MRCompactorJobRunner> jobRunnables;
private final Closer closer;
private final Optional<DataCompletenessVerifier> verifier;
private final Stopwatch stopwatch;
private final GobblinMetrics gobblinMetrics;
private final EventSubmitter eventSubmitter;
private final Optional<CompactorListener> compactorListener;
private final DateTime initilizeTime;
private final long dataVerifTimeoutMinutes;
private final long compactionTimeoutMinutes;
private final boolean shouldVerifDataCompl;
private final boolean shouldPublishDataIfCannotVerifyCompl;
private final CompactorCompletionListener compactionCompleteListener;
public MRCompactor(Properties props, List<? extends Tag<?>> tags, Optional<CompactorListener> compactorListener)
throws IOException {
this.state = new State();
this.state.addAll(props);
this.initilizeTime = getCurrentTime();
this.tags = tags;
this.conf = HadoopUtils.getConfFromState(this.state);
this.tmpOutputDir = getTmpOutputDir();
this.fs = getFileSystem();
this.datasets = getDatasetsFinder().findDistinctDatasets();
this.jobExecutor = createJobExecutor();
this.jobRunnables = Maps.newConcurrentMap();
this.closer = Closer.create();
this.stopwatch = Stopwatch.createStarted();
this.gobblinMetrics = initializeMetrics();
this.eventSubmitter = new EventSubmitter.Builder(
GobblinMetrics.get(this.state.getProp(ConfigurationKeys.JOB_NAME_KEY)).getMetricContext(),
MRCompactor.COMPACTION_TRACKING_EVENTS_NAMESPACE).build();
this.compactorListener = compactorListener;
this.dataVerifTimeoutMinutes = getDataVerifTimeoutMinutes();
this.compactionTimeoutMinutes = getCompactionTimeoutMinutes();
this.shouldVerifDataCompl = shouldVerifyDataCompleteness();
this.compactionCompleteListener = getCompactionCompleteListener();
this.verifier =
this.shouldVerifDataCompl ? Optional.of(this.closer.register(new DataCompletenessVerifier(this.state)))
: Optional.<DataCompletenessVerifier> absent();
this.shouldPublishDataIfCannotVerifyCompl = shouldPublishDataIfCannotVerifyCompl();
}
public DateTime getInitializeTime() {
return this.initilizeTime;
}
private String getTmpOutputDir() {
return this.state.getProp(COMPACTION_TMP_DEST_DIR, DEFAULT_COMPACTION_TMP_DEST_DIR);
}
private FileSystem getFileSystem() throws IOException {
if (this.state.contains(COMPACTION_FILE_SYSTEM_URI)) {
URI uri = URI.create(this.state.getProp(COMPACTION_FILE_SYSTEM_URI));
return FileSystem.get(uri, this.conf);
}
return FileSystem.get(this.conf);
}
private DatasetsFinder getDatasetsFinder() {
try {
return (DatasetsFinder) Class
.forName(this.state.getProp(COMPACTION_DATASETS_FINDER, DEFAULT_COMPACTION_DATASETS_FINDER))
.getConstructor(State.class).newInstance(this.state);
} catch (Exception e) {
throw new RuntimeException("Failed to initiailize DatasetsFinder.", e);
}
}
private DateTime getCurrentTime () {
DateTimeZone timeZone = DateTimeZone
.forID(this.state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
return new DateTime (timeZone);
}
private JobRunnerExecutor createJobExecutor() {
int threadPoolSize = getThreadPoolSize();
BlockingQueue<Runnable> queue = new PriorityBlockingQueue<>();
return new JobRunnerExecutor(threadPoolSize, threadPoolSize, Long.MAX_VALUE, TimeUnit.NANOSECONDS, queue);
}
private int getThreadPoolSize() {
return this.state.getPropAsInt(COMPACTION_THREAD_POOL_SIZE, DEFAULT_COMPACTION_THREAD_POOL_SIZE);
}
private GobblinMetrics initializeMetrics() {
ImmutableList.Builder<Tag<?>> tags = ImmutableList.builder();
tags.addAll(this.tags);
tags.addAll(Tag.fromMap(ClusterNameTags.getClusterNameTags()));
GobblinMetrics gobblinMetrics =
GobblinMetrics.get(this.state.getProp(ConfigurationKeys.JOB_NAME_KEY), null, tags.build());
try {
gobblinMetrics.startMetricReporting(this.state.getProperties());
} catch (MultiReporterException ex) {
for (MetricReporterException e: ex.getExceptions()) {
LOG.error("Failed to start {} {} reporter.", e.getSinkType().name(), e.getReporterType().name(), e);
}
}
return gobblinMetrics;
}
@Override
public void compact() throws IOException {
try {
copyDependencyJarsToHdfs();
processDatasets();
throwExceptionsIfAnyDatasetCompactionFailed();
onCompactionCompletion();
} catch (Throwable t) {
// This throwable is logged here before propagated. Otherwise, if another throwable is thrown
// in the finally-block, this throwable may be suppressed.
LOG.error("Caught throwable during compaction", t);
throw Throwables.propagate(t);
} finally {
try {
shutdownExecutors();
this.closer.close();
} finally {
deleteDependencyJars();
this.gobblinMetrics.stopMetricsReporting();
}
}
}
private CompactorCompletionListener getCompactionCompleteListener () {
ClassAliasResolver<CompactorCompletionListenerFactory> classAliasResolver = new ClassAliasResolver<>(CompactorCompletionListenerFactory.class);
String listenerName= this.state.getProp(MRCompactor.COMPACTION_COMPLETE_LISTERNER,
MRCompactor.DEFAULT_COMPACTION_COMPLETE_LISTERNER);
try {
CompactorCompletionListenerFactory factory = GobblinConstructorUtils.invokeFirstConstructor(
classAliasResolver.resolveClass(listenerName), ImmutableList.of());
return factory.createCompactorCompactionListener(this.state);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
private void onCompactionCompletion() {
this.compactionCompleteListener.onCompactionCompletion(this);
}
/**
* Copy dependency jars from local fs to HDFS.
*/
private void copyDependencyJarsToHdfs() throws IOException {
if (!this.state.contains(ConfigurationKeys.JOB_JAR_FILES_KEY)) {
return;
}
LocalFileSystem lfs = FileSystem.getLocal(this.conf);
Path tmpJarFileDir = new Path(this.tmpOutputDir, "_gobblin_compaction_jars");
this.state.setProp(COMPACTION_JARS, tmpJarFileDir.toString());
this.fs.delete(tmpJarFileDir, true);
for (String jarFile : this.state.getPropAsList(ConfigurationKeys.JOB_JAR_FILES_KEY)) {
for (FileStatus status : lfs.globStatus(new Path(jarFile))) {
Path tmpJarFile = new Path(this.fs.makeQualified(tmpJarFileDir), status.getPath().getName());
this.fs.copyFromLocalFile(status.getPath(), tmpJarFile);
LOG.info(String.format("%s will be added to classpath", tmpJarFile));
}
}
}
/**
* Delete dependency jars from HDFS when job is done.
*/
private void deleteDependencyJars() throws IllegalArgumentException, IOException {
if (this.state.contains(COMPACTION_JARS)) {
this.fs.delete(new Path(this.state.getProp(COMPACTION_JARS)), true);
}
}
private void processDatasets() {
createJobPropsForDatasets();
processCompactionJobs();
}
/**
* Create compaction job properties for {@link Dataset}s.
*/
private void createJobPropsForDatasets() {
final Set<Dataset> datasetsWithProps = Sets.newHashSet();
for (Dataset dataset : this.datasets) {
datasetsWithProps.addAll(createJobPropsForDataset(dataset));
}
this.datasets.clear();
this.datasets.addAll(datasetsWithProps);
}
/**
* Existing dataset in {@link #datasets} does not have job props.
* Create compaction job properties for each given {@link Dataset}.
* Update datasets based on the results of creating job props for them.
*/
private List<Dataset> createJobPropsForDataset(Dataset dataset) {
LOG.info("Creating compaction jobs for dataset " + dataset + " with priority " + dataset.priority());
final MRCompactorJobPropCreator jobPropCreator = getJobPropCreator(dataset);
List<Dataset> datasetsWithProps;
try {
datasetsWithProps = jobPropCreator.createJobProps();
} catch (Throwable t) {
// If a throwable is caught when creating job properties for a dataset, skip the topic and add the throwable
// to the dataset.
datasetsWithProps = ImmutableList.<Dataset> of(jobPropCreator.createFailedJobProps(t));
}
return datasetsWithProps;
}
/**
* Get an instance of {@link MRCompactorJobPropCreator}.
*/
MRCompactorJobPropCreator getJobPropCreator(Dataset dataset) {
try {
return new MRCompactorJobPropCreator.Builder().withDataset(dataset).withFileSystem(this.fs).withState(this.state)
.build();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public Set<Dataset> getDatasets() {
return this.datasets;
}
private void processCompactionJobs() {
if (this.shouldVerifDataCompl) {
verifyDataCompleteness();
} else {
setAllDatasetStatesToVerified();
}
this.submitCompactionJobsAndWaitForCompletion();
}
private boolean shouldVerifyDataCompleteness() {
return this.state.getPropAsBoolean(COMPACTION_COMPLETENESS_VERIFICATION_ENABLED,
DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_ENABLED);
}
private void verifyDataCompleteness() {
List<Pattern> blacklist =
DatasetFilterUtils.getPatternList(this.state, COMPACTION_COMPLETENESS_VERIFICATION_BLACKLIST);
List<Pattern> whitelist =
DatasetFilterUtils.getPatternList(this.state, COMPACTION_COMPLETENESS_VERIFICATION_WHITELIST);
int numDatasetsVerifiedTogether = getNumDatasetsVerifiedTogether();
List<Dataset> datasetsToBeVerified = Lists.newArrayList();
for (Dataset dataset : this.datasets) {
if (dataset.state() != UNVERIFIED) {
continue;
}
if (shouldVerifyCompletenessForDataset(dataset, blacklist, whitelist)) {
datasetsToBeVerified.add(dataset);
if (datasetsToBeVerified.size() >= numDatasetsVerifiedTogether) {
ListenableFuture<Results> future = this.verifier.get().verify(datasetsToBeVerified);
addCallback(datasetsToBeVerified, future);
datasetsToBeVerified = Lists.newArrayList();
}
} else {
dataset.setState(VERIFIED);
}
}
if (!datasetsToBeVerified.isEmpty()) {
ListenableFuture<Results> future = this.verifier.get().verify(datasetsToBeVerified);
addCallback(datasetsToBeVerified, future);
}
}
/**
* A {@link Dataset} should be verified if its not already compacted, and it satisfies the blacklist and whitelist.
*/
private boolean shouldVerifyCompletenessForDataset(Dataset dataset, List<Pattern> blacklist,
List<Pattern> whitelist) {
boolean renamingRequired = this.state.getPropAsBoolean(COMPACTION_RENAME_SOURCE_DIR_ENABLED, DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
LOG.info ("Should verify completeness with renaming source dir : " + renamingRequired);
return !datasetAlreadyCompacted(this.fs, dataset, renamingRequired)
&& DatasetFilterUtils.survived(dataset.getName(), blacklist, whitelist);
}
/**
* Get all the renamed directories from the given paths
* They are deepest level containing directories whose name has a suffix {@link MRCompactor#COMPACTION_RENAME_SOURCE_DIR_SUFFIX}
* Also each directory needs to contain at least one file so empty directories will be excluded from the result
*/
public static Set<Path> getDeepestLevelRenamedDirsWithFileExistence (FileSystem fs, Set<Path> paths) throws IOException {
Set<Path> renamedDirs = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, paths)) {
if (fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
renamedDirs.add(fileStatus.getPath().getParent());
}
}
return renamedDirs;
}
/**
* Get all the unrenamed directories from the given paths
* They are deepest level containing directories whose name doesn't have a suffix {@link MRCompactor#COMPACTION_RENAME_SOURCE_DIR_SUFFIX}
* Also each directory needs to contain at least one file so empty directories will be excluded from the result
*/
public static Set<Path> getDeepestLevelUnrenamedDirsWithFileExistence (FileSystem fs, Set<Path> paths) throws IOException {
Set<Path> unrenamed = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, paths)) {
if (!fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
unrenamed.add(fileStatus.getPath().getParent());
}
}
return unrenamed;
}
/**
* Rename all the source directories for a specific dataset
*/
public static void renameSourceDirAsCompactionComplete (FileSystem fs, Dataset dataset) {
try {
for (Path path: dataset.getRenamePaths()) {
Path newPath = new Path (path.getParent(), path.getName() + MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX);
LOG.info("[{}] Renaming {} to {}", dataset.getDatasetName(), path, newPath);
fs.rename(path, newPath);
}
} catch (Exception e) {
LOG.error ("Rename input path failed", e);
}
}
/**
* A {@link Dataset} is considered already compacted if either condition is true:
* 1) When completion file strategy is used, a compaction completion means there is a file named
* {@link MRCompactor#COMPACTION_COMPLETE_FILE_NAME} in its {@link Dataset#outputPath()}.
* 2) When renaming source directory strategy is used, a compaction completion means source directories
* {@link Dataset#inputPaths()} contains at least one directory which has been renamed to something with
* {@link MRCompactor#COMPACTION_RENAME_SOURCE_DIR_SUFFIX}.
*/
public static boolean datasetAlreadyCompacted(FileSystem fs, Dataset dataset, boolean renameSourceEnable) {
if (renameSourceEnable) {
return checkAlreadyCompactedBasedOnSourceDirName (fs, dataset);
} else {
return checkAlreadyCompactedBasedOnCompletionFile(fs, dataset);
}
}
/** When renaming source directory strategy is used, a compaction completion means source directories
* {@link Dataset#inputPaths()} contains at least one directory which has been renamed to something with
* {@link MRCompactor#COMPACTION_RENAME_SOURCE_DIR_SUFFIX}.
*/
private static boolean checkAlreadyCompactedBasedOnSourceDirName (FileSystem fs, Dataset dataset) {
try {
Set<Path> renamedDirs = getDeepestLevelRenamedDirsWithFileExistence(fs, dataset.inputPaths());
return !renamedDirs.isEmpty();
} catch (IOException e) {
LOG.error("Failed to get deepest directories from source", e);
return false;
}
}
/**
* When completion file strategy is used, a compaction completion means there is a file named
* {@link MRCompactor#COMPACTION_COMPLETE_FILE_NAME} in its {@link Dataset#outputPath()}.
*/
private static boolean checkAlreadyCompactedBasedOnCompletionFile(FileSystem fs, Dataset dataset) {
Path filePath = new Path(dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME);
try {
return fs.exists(filePath);
} catch (IOException e) {
LOG.error("Failed to verify the existence of file " + filePath, e);
return false;
}
}
public static long readCompactionTimestamp(FileSystem fs, Path compactionOutputPath) throws IOException {
Path completionFilePath = new Path(compactionOutputPath, COMPACTION_COMPLETE_FILE_NAME);
try (FSDataInputStream completionFileStream = fs.open(completionFilePath)) {
return completionFileStream.readLong();
}
}
private void addCallback(final List<Dataset> datasetsToBeVerified, ListenableFuture<Results> future) {
Futures.addCallback(future, new FutureCallback<Results>() {
/**
* On success, resubmit verification for the {@link Dataset}s that should be resubmitted
* (i.e., verification didn't pass and it didn't timeout).
*/
@Override
public void onSuccess(Results results) {
List<Dataset> datasetsToBeVerifiedAgain = Lists.newArrayList();
for (Results.Result result : results) {
Optional<MRCompactorJobRunner> jobRunner =
Optional.fromNullable(MRCompactor.this.jobRunnables.get(result.dataset()));
switch (result.status()) {
case PASSED:
LOG.info("Completeness verification for dataset " + result.dataset() + " passed.");
submitVerificationSuccessSlaEvent(result);
result.dataset().setState(VERIFIED);
if (jobRunner.isPresent()) {
jobRunner.get().proceed();
}
break;
case FAILED:
if (shouldGiveUpVerification()) {
LOG.info("Completeness verification for dataset " + result.dataset() + " has timed out.");
submitVerificationSuccessSlaEvent(result);
result.dataset().setState(GIVEN_UP);
result.dataset().addThrowable(new RuntimeException(
String.format("Completeness verification for dataset %s failed or timed out.", result.dataset())));
} else {
LOG.info("Completeness verification for dataset " + result.dataset() + " failed. Will verify again.");
datasetsToBeVerifiedAgain.add(result.dataset());
}
break;
default:
throw new IllegalStateException("Unrecognized result status: " + result.status());
}
}
if (!datasetsToBeVerifiedAgain.isEmpty()) {
ListenableFuture<Results> future2 = MRCompactor.this.verifier.get().verify(datasetsToBeVerifiedAgain);
addCallback(datasetsToBeVerifiedAgain, future2);
}
}
/**
* On failure, resubmit verification for all {@link Dataset}s, unless timed out.
*/
@Override
public void onFailure(Throwable t) {
LOG.error("Failed to verify completeness for the following datasets: " + datasetsToBeVerified, t);
if (shouldGiveUpVerification()) {
for (Dataset dataset : datasetsToBeVerified) {
LOG.warn(String.format("Completeness verification for dataset %s has timed out.", dataset));
submitFailureSlaEvent(dataset, CompactionSlaEventHelper.COMPLETION_VERIFICATION_FAILED_EVENT_NAME);
dataset.setState(GIVEN_UP);
dataset.addThrowable(new RuntimeException(
String.format("Completeness verification for dataset %s failed or timed out.", dataset)));
}
} else {
ListenableFuture<Results> future2 = MRCompactor.this.verifier.get().verify(datasetsToBeVerified);
addCallback(datasetsToBeVerified, future2);
}
}
});
}
/**
* Get the number of {@link Dataset}s to be verified together. This allows multiple {@link Dataset}s
* to share the same verification job, e.g., share the same query.
*/
private int getNumDatasetsVerifiedTogether() {
return this.state.getPropAsInt(COMPACTION_COMPLETENESS_VERIFICATION_NUM_DATASETS_VERIFIED_TOGETHER,
DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_NUM_DATASETS_VERIFIED_TOGETHER);
}
private void setAllDatasetStatesToVerified() {
for (Dataset dataset : this.datasets) {
dataset.compareAndSetState(UNVERIFIED, VERIFIED);
}
}
/**
* Data completeness verification of a folder should give up if timed out.
*/
private boolean shouldGiveUpVerification() {
return this.stopwatch.elapsed(TimeUnit.MINUTES) >= this.dataVerifTimeoutMinutes;
}
private boolean shouldPublishDataIfCannotVerifyCompl() {
return this.state.getPropAsBoolean(COMPACTION_COMPLETENESS_VERIFICATION_PUBLISH_DATA_IF_CANNOT_VERIFY,
DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_PUBLISH_DATA_IF_CANNOT_VERIFY);
}
private void submitCompactionJobsAndWaitForCompletion() {
LOG.info("Submitting compaction jobs. Number of datasets: " + this.datasets.size());
boolean allDatasetsCompleted = false;
while (!allDatasetsCompleted) {
allDatasetsCompleted = true;
for (Dataset dataset : this.datasets) {
MRCompactorJobRunner jobRunner = MRCompactor.this.jobRunnables.get(dataset);
if (dataset.state() == VERIFIED || dataset.state() == UNVERIFIED) {
allDatasetsCompleted = false;
// Run compaction for a dataset, if it is not already running or completed
if (jobRunner == null || jobRunner.status() == ABORTED) {
runCompactionForDataset(dataset, dataset.state() == VERIFIED);
}
} else if (dataset.state() == GIVEN_UP) {
if (this.shouldPublishDataIfCannotVerifyCompl) {
allDatasetsCompleted = false;
if (jobRunner == null || jobRunner.status() == ABORTED) {
runCompactionForDataset(dataset, true);
} else {
jobRunner.proceed();
}
} else {
if (jobRunner != null) {
jobRunner.abort();
}
}
}
}
if (this.stopwatch.elapsed(TimeUnit.MINUTES) >= this.compactionTimeoutMinutes) {
// Compaction timed out. Killing all compaction jobs running
LOG.error("Compaction timed-out. Killing all running jobs");
for (MRCompactorJobRunner jobRunner : MRCompactor.this.jobRunnables.values()) {
jobRunner.abort();
}
break;
}
// Sleep for a few seconds before another round
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(COMPACTION_JOB_WAIT_INTERVAL_SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted while waiting", e);
}
}
}
/**
* Run compaction job for a {@link Dataset}.
*
* @param dataset The input {@link Dataset} to run compaction for.
* @param proceed Whether the compaction job is permitted to publish data. If data completeness verification
* is enabled and the status of the inputFolder is UNVERIFIED, 'proceed' should be set to false.
* Otherwise it should be set to true.
*/
private void runCompactionForDataset(Dataset dataset, boolean proceed) {
LOG.info("Running compaction for dataset " + dataset);
try {
MRCompactorJobRunner jobRunner = getMRCompactorJobRunner(dataset);
this.jobRunnables.put(dataset, jobRunner);
if (proceed) {
jobRunner.proceed();
}
this.jobExecutor.execute(jobRunner);
} catch (Throwable t) {
dataset.skip(t);
}
}
/**
* Get an instance of {@link MRCompactorJobRunner}.
*/
private MRCompactorJobRunner getMRCompactorJobRunner(Dataset dataset) {
try {
@SuppressWarnings("unchecked")
Class<? extends MRCompactorJobRunner> cls = (Class<? extends MRCompactorJobRunner>) Class
.forName(this.state.getProp(COMPACTION_JOB_RUNNER_CLASS, DEFAULT_COMPACTION_JOB_RUNNER_CLASS));
return cls.getDeclaredConstructor(Dataset.class, FileSystem.class).newInstance(dataset, this.fs);
} catch (Exception e) {
throw new RuntimeException("Cannot instantiate MRCompactorJobRunner", e);
}
}
/**
* Keep track of running MR jobs, so if the compaction is cancelled, the MR jobs can be killed.
*/
public static void addRunningHadoopJob(Dataset dataset, Job job) {
MRCompactor.RUNNING_MR_JOBS.put(dataset, job);
}
private long getCompactionTimeoutMinutes() {
return this.state.getPropAsLong(COMPACTION_MR_JOB_TIMEOUT_MINUTES, DEFAULT_COMPACTION_MR_JOB_TIMEOUT_MINUTES);
}
private long getDataVerifTimeoutMinutes() {
return this.state.getPropAsLong(COMPACTION_VERIFICATION_TIMEOUT_MINUTES,
DEFAULT_COMPACTION_VERIFICATION_TIMEOUT_MINUTES);
}
private void throwExceptionsIfAnyDatasetCompactionFailed() {
Set<Dataset> datasetsWithThrowables = getDatasetsWithThrowables();
int numDatasetsWithThrowables = 0;
for (Dataset dataset : datasetsWithThrowables) {
numDatasetsWithThrowables++;
for (Throwable t : dataset.throwables()) {
LOG.error("Error processing dataset " + dataset, t);
submitFailureSlaEvent(dataset, CompactionSlaEventHelper.COMPACTION_FAILED_EVENT_NAME);
}
}
if (numDatasetsWithThrowables > 0) {
throw new RuntimeException(String.format("Failed to process %d datasets.", numDatasetsWithThrowables));
}
}
/**
* Return all {@link Dataset}s where a {@link Throwable} is thrown from the compaction job.
*/
private Set<Dataset> getDatasetsWithThrowables() {
Set<Dataset> datasetsWithThrowables = Sets.newHashSet();
for (Dataset dataset : this.datasets) {
if (!dataset.throwables().isEmpty()) {
datasetsWithThrowables.add(dataset);
}
}
return datasetsWithThrowables;
}
private void shutdownExecutors() {
LOG.info("Shutting down Executors");
ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG));
}
@Override
public void cancel() throws IOException {
try {
for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) {
Job hadoopJob = entry.getValue();
if (!hadoopJob.isComplete()) {
LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(), entry.getKey()));
hadoopJob.killJob();
}
}
} finally {
try {
ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS);
} finally {
if (this.verifier.isPresent()) {
this.verifier.get().closeNow();
}
}
}
}
public static void modifyDatasetStateToRecompact (Dataset dataset) {
// Modify the dataset for recompaction
LOG.info ("{} changes to recompact mode", dataset.getDatasetName());
State recompactState = new State();
recompactState.setProp(MRCompactor.COMPACTION_RECOMPACT_FROM_DEST_PATHS, Boolean.TRUE);
recompactState.setProp(MRCompactor.COMPACTION_JOB_LATE_DATA_MOVEMENT_TASK, Boolean.FALSE);
dataset.modifyDatasetForRecompact(recompactState);
dataset.setState(VERIFIED);
}
/**
* A subclass of {@link ThreadPoolExecutor} for running compaction jobs, and performs necessary steps
* after each compaction job finishes.
*/
private class JobRunnerExecutor extends ThreadPoolExecutor {
public JobRunnerExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
}
/**
* When a compaction job for a {@link Dataset} finishes, if it successfully published the data (t == null
* && jobRunner.status() == {@link MRCompactorJobRunner.Status#COMMITTED}, or if it
* threw any {@link Throwable} (t != null), mark the {@link Dataset} as
* {@link Dataset.DatasetState#COMPACTION_COMPLETE}.
* If the job failed to publish the data because the input data was not complete, reduce the priority of
* the {@link Dataset}. A new compaction job will be submitted later with a lower priority.
*/
@Override
protected void afterExecute(Runnable r, Throwable t) {
Preconditions.checkArgument(r instanceof MRCompactorJobRunner,
String.format("Runnable expected to be instance of %s, actual %s", MRCompactorJobRunner.class.getSimpleName(),
r.getClass().getSimpleName()));
MRCompactorJobRunner jobRunner = (MRCompactorJobRunner) r;
MRCompactor.this.jobRunnables.remove(jobRunner.getDataset());
if (t == null) {
if (jobRunner.status() == COMMITTED) {
if (jobRunner.getDataset().needToRecompact()) {
modifyDatasetStateToRecompact (jobRunner.getDataset());
} else {
// Set the dataset status to COMPACTION_COMPLETE if compaction is successful.
jobRunner.getDataset().setState(COMPACTION_COMPLETE);
}
if (MRCompactor.this.compactorListener.isPresent()) {
try {
MRCompactor.this.compactorListener.get().onDatasetCompactionCompletion(jobRunner.getDataset());
} catch (Exception e) {
t = e;
}
}
} else if (jobRunner.getDataset().state() == GIVEN_UP
&& !MRCompactor.this.shouldPublishDataIfCannotVerifyCompl) {
// Compaction job of a dataset has aborted, and data completeness verification has given up.
// This dataset will not be compacted.
LOG.info(String.format("Dataset %s will not be compacted, since data completeness cannot be verified",
jobRunner.getDataset()));
jobRunner.getDataset().setState(COMPACTION_COMPLETE);
} else {
// Compaction job of a dataset has aborted because data completeness is not verified.
// Reduce priority and try again.
jobRunner.getDataset().reducePriority();
}
}
if (t != null) {
// Compaction job of a dataset has failed with a throwable.
afterExecuteWithThrowable(jobRunner, t);
}
}
private void afterExecuteWithThrowable(MRCompactorJobRunner jobRunner, Throwable t) {
jobRunner.getDataset().skip(t);
}
}
/**
* Submit an event when completeness verification is successful
*/
private void submitVerificationSuccessSlaEvent(Results.Result result) {
try {
CompactionSlaEventHelper.getEventSubmitterBuilder(result.dataset(), Optional.<Job> absent(), this.fs)
.eventSubmitter(this.eventSubmitter).eventName(CompactionSlaEventHelper.COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME)
.additionalMetadata(Maps.transformValues(result.verificationContext(), Functions.toStringFunction())).build()
.submit();
} catch (Throwable t) {
LOG.warn("Failed to submit verification success event:" + t, t);
}
}
/**
* Submit a failure sla event
*/
private void submitFailureSlaEvent(Dataset dataset, String eventName) {
try {
CompactionSlaEventHelper.getEventSubmitterBuilder(dataset, Optional.<Job> absent(), this.fs)
.eventSubmitter(this.eventSubmitter).eventName(eventName).build().submit();
} catch (Throwable t) {
LOG.warn("Failed to submit failure sla event:" + t, t);
}
}
}
| 1,850 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionCombineFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.VersionInfo;
public abstract class CompactionCombineFileInputFormat<KI, KO> extends CombineFileInputFormat<KI, KO> {
private static final String COMPACTION_JOB_PREFIX = "compaction.job.";
/**
* Properties related to the input format of the compaction job of a dataset.
*/
@VisibleForTesting
static final String COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE = COMPACTION_JOB_PREFIX + "mapred.max.split.size";
private static final long DEFAULT_COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE = 268435456;
@VisibleForTesting
static final String COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE = COMPACTION_JOB_PREFIX + "mapred.min.split.size";
private static final long DEFAULT_COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE = 268435456;
private static final int SPLIT_MAX_NUM_LOCATIONS = 10;
@Override
public List<InputSplit> getSplits(JobContext cx) throws IOException {
Job modifiedJob = Job.getInstance(cx.getConfiguration());
setSplitSize(modifiedJob);
FileInputFormat.setInputDirRecursive(modifiedJob, true);
return cleanSplits(super.getSplits(modifiedJob));
}
private void setSplitSize(JobContext cx) {
super.setMaxSplitSize(cx.getConfiguration().getLong(COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE,
DEFAULT_COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE));
super.setMinSplitSizeNode(cx.getConfiguration().getLong(COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE,
DEFAULT_COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE));
}
/**
* Set the number of locations in the split to SPLIT_MAX_NUM_LOCATIONS if it is larger than
* SPLIT_MAX_NUM_LOCATIONS (MAPREDUCE-5186).
*/
private static List<InputSplit> cleanSplits(List<InputSplit> splits) throws IOException {
if (VersionInfo.getVersion().compareTo("2.3.0") >= 0) {
// This issue was fixed in 2.3.0, if newer version, no need to clean up splits
return splits;
}
List<InputSplit> cleanedSplits = Lists.newArrayList();
for (int i = 0; i < splits.size(); i++) {
CombineFileSplit oldSplit = (CombineFileSplit) splits.get(i);
String[] locations = oldSplit.getLocations();
Preconditions.checkNotNull(locations, "CombineFileSplit.getLocations() returned null");
if (locations.length > SPLIT_MAX_NUM_LOCATIONS) {
locations = Arrays.copyOf(locations, SPLIT_MAX_NUM_LOCATIONS);
}
cleanedSplits.add(new CombineFileSplit(oldSplit.getPaths(), oldSplit.getStartOffsets(), oldSplit.getLengths(),
locations));
}
return cleanedSplits;
}
}
| 1,851 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/RecordKeyMapperBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import org.apache.hadoop.mapreduce.Mapper;
public abstract class RecordKeyMapperBase<KI, VI, KO, VO> extends Mapper<KI, VI, KO, VO> {
public enum EVENT_COUNTER {
RECORD_COUNT
}
}
| 1,852 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionAvroJobConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.avro.AvroKeyCompactorOutputFormat;
import org.apache.gobblin.compaction.mapreduce.avro.AvroKeyDedupReducer;
import org.apache.gobblin.compaction.mapreduce.avro.AvroKeyMapper;
import org.apache.gobblin.compaction.mapreduce.avro.AvroKeyRecursiveCombineFileInputFormat;
import org.apache.gobblin.compaction.mapreduce.avro.MRCompactorAvroKeyDedupJobRunner;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.filter.AvroSchemaFieldRemover;
import org.apache.gobblin.util.AvroUtils;
/**
* A configurator that focused on creating avro compaction map-reduce job
*/
@Slf4j
public class CompactionAvroJobConfigurator extends CompactionJobConfigurator {
private Optional<String> keyFieldBlacklist;
public static class Factory implements CompactionJobConfigurator.ConfiguratorFactory {
@Override
public CompactionJobConfigurator createConfigurator(State state) throws IOException {
return new CompactionAvroJobConfigurator(state);
}
}
@Override
public String getFileExtension(){
return EXTENSION.AVRO.getExtensionString();
}
/**
* Constructor
* @param state A task level state
*/
public CompactionAvroJobConfigurator(State state) throws IOException {
super(state);
keyFieldBlacklist =
Optional.fromNullable(state.getProp(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_KEY_FIELD_BLACKLIST));
}
/**
* Refer to MRCompactorAvroKeyDedupJobRunner#getDedupKeyOption()
*/
private MRCompactorAvroKeyDedupJobRunner.DedupKeyOption getDedupKeyOption() {
if (!this.state.contains(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_DEDUP_KEY)) {
return MRCompactorAvroKeyDedupJobRunner.DEFAULT_DEDUP_KEY_OPTION;
}
Optional<MRCompactorAvroKeyDedupJobRunner.DedupKeyOption> option =
Enums.getIfPresent(MRCompactorAvroKeyDedupJobRunner.DedupKeyOption.class,
this.state.getProp(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_DEDUP_KEY).toUpperCase());
return option.isPresent() ? option.get() : MRCompactorAvroKeyDedupJobRunner.DEFAULT_DEDUP_KEY_OPTION;
}
/**
* Refer to MRCompactorAvroKeyDedupJobRunner#getKeySchema(Job, Schema)
*/
@VisibleForTesting
Schema getDedupKeySchema(Schema topicSchema) {
boolean keySchemaFileSpecified =
this.state.contains(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC);
Schema keySchema = null;
MRCompactorAvroKeyDedupJobRunner.DedupKeyOption dedupKeyOption = getDedupKeyOption();
if (dedupKeyOption == MRCompactorAvroKeyDedupJobRunner.DedupKeyOption.ALL) {
log.info("Using all attributes in the schema (except Map, Arrar and Enum fields) for compaction");
keySchema = AvroUtils.removeUncomparableFields(topicSchema).get();
} else if (dedupKeyOption == MRCompactorAvroKeyDedupJobRunner.DedupKeyOption.KEY) {
log.info("Using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(MRCompactorAvroKeyDedupJobRunner.getKeySchema(topicSchema)).get();
} else if (keySchemaFileSpecified) {
Path keySchemaFile = new Path(state.getProp(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC));
log.info("Using attributes specified in schema file " + keySchemaFile + " for compaction");
try {
keySchema = AvroUtils.parseSchemaFromFile(keySchemaFile, this.fs);
} catch (IOException e) {
log.error("Failed to parse avro schema from " + keySchemaFile
+ ", using key attributes in the schema for compaction");
keySchema =
AvroUtils.removeUncomparableFields(MRCompactorAvroKeyDedupJobRunner.getKeySchema(topicSchema)).get();
}
if (!MRCompactorAvroKeyDedupJobRunner.isKeySchemaValid(keySchema, topicSchema)) {
log.warn(String.format("Key schema %s is not compatible with record schema %s.", keySchema, topicSchema)
+ "Using key attributes in the schema for compaction");
keySchema =
AvroUtils.removeUncomparableFields(MRCompactorAvroKeyDedupJobRunner.getKeySchema(topicSchema)).get();
}
} else {
log.info("Property " + MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC
+ " not provided. Using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(MRCompactorAvroKeyDedupJobRunner.getKeySchema(topicSchema)).get();
}
if (keyFieldBlacklist.isPresent()) {
AvroSchemaFieldRemover fieldRemover = new AvroSchemaFieldRemover(keyFieldBlacklist.get());
keySchema = fieldRemover.removeFields(keySchema);
log.info("Adjusted key schema {}", keySchema);
}
return keySchema;
}
@Override
protected void configureSchema(Job job) throws IOException {
Schema newestSchema = MRCompactorAvroKeyDedupJobRunner.getNewestSchemaFromSource(job, this.fs);
if (newestSchema != null) {
if (this.state.getPropAsBoolean(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_AVRO_SINGLE_INPUT_SCHEMA, true)) {
AvroJob.setInputKeySchema(job, newestSchema);
}
AvroJob.setMapOutputKeySchema(job, this.shouldDeduplicate ? getDedupKeySchema(newestSchema) : newestSchema);
AvroJob.setMapOutputValueSchema(job, newestSchema);
AvroJob.setOutputKeySchema(job, newestSchema);
}
}
protected void configureMapper(Job job) {
job.setInputFormatClass(AvroKeyRecursiveCombineFileInputFormat.class);
job.setMapperClass(AvroKeyMapper.class);
job.setMapOutputKeyClass(AvroKey.class);
job.setMapOutputValueClass(AvroValue.class);
}
protected void configureReducer(Job job) throws IOException {
job.setOutputFormatClass(AvroKeyCompactorOutputFormat.class);
job.setReducerClass(AvroKeyDedupReducer.class);
job.setOutputKeyClass(AvroKey.class);
job.setOutputValueClass(NullWritable.class);
setNumberOfReducers(job);
}
}
| 1,853 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactorOutputCommitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.lang.reflect.Method;
import org.apache.commons.io.FilenameUtils;
import org.apache.gobblin.compaction.mapreduce.avro.MRCompactorAvroKeyDedupJobRunner;
import org.apache.gobblin.util.recordcount.CompactionRecordCountProvider;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class used with {@link MRCompactorAvroKeyDedupJobRunner} to rename files as they
* are being committed. In addition to moving files from their working directory to
* the commit output directory, the files are named to include a timestamp and a
* count of how many records the file contains, in the format
* {recordCount}.{timestamp}.<extensionName>(avro, orc, etc.).
*/
public class CompactorOutputCommitter extends FileOutputCommitter {
public enum EVENT_COUNTER {
OUTPUT_FILE_COUNT
}
/**
* Note that the value of this key doesn't have dot.
*/
public static final String COMPACTION_OUTPUT_EXTENSION = "compaction.output.extension";
public static final String DEFAULT_COMPACTION_OUTPUT_EXTENSION = "avro";
private static final Logger LOG = LoggerFactory.getLogger(CompactorOutputCommitter.class);
private final String compactionFileExtension;
public CompactorOutputCommitter(Path output, TaskAttemptContext context) throws IOException {
super(output, context);
compactionFileExtension = context.getConfiguration().get(COMPACTION_OUTPUT_EXTENSION,
DEFAULT_COMPACTION_OUTPUT_EXTENSION);
}
/**
* Commits the task, moving files to their final committed location by delegating to
* {@link FileOutputCommitter} to perform the actual moving. First, renames the
* files to include the count of records contained within the file and a timestamp,
* in the form {recordCount}.{timestamp}.avro. Then, the files are moved to their
* committed location.
*/
@Override
public void commitTask(TaskAttemptContext context) throws IOException {
Path workPath = getWorkPath();
FileSystem fs = workPath.getFileSystem(context.getConfiguration());
if (fs.exists(workPath)) {
long recordCount = getRecordCountFromCounter(context, RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT);
String fileNamePrefix;
if (recordCount == 0) {
// recordCount == 0 indicates that it is a map-only, non-dedup job, and thus record count should
// be obtained from mapper counter.
fileNamePrefix = CompactionRecordCountProvider.M_OUTPUT_FILE_PREFIX;
recordCount = getRecordCountFromCounter(context, RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT);
} else {
fileNamePrefix = CompactionRecordCountProvider.MR_OUTPUT_FILE_PREFIX;
}
String fileName = CompactionRecordCountProvider.constructFileName(fileNamePrefix,
"." + compactionFileExtension, recordCount);
for (FileStatus status : fs.listStatus(workPath, new PathFilter() {
@Override
public boolean accept(Path path) {
return FilenameUtils.isExtension(path.getName(), compactionFileExtension);
}
})) {
Path newPath = new Path(status.getPath().getParent(), fileName);
LOG.info(String.format("Renaming %s to %s", status.getPath(), newPath));
fs.rename(status.getPath(), newPath);
context.getCounter(EVENT_COUNTER.OUTPUT_FILE_COUNT).increment(1);
}
}
super.commitTask(context);
}
private static long getRecordCountFromCounter(TaskAttemptContext context, Enum<?> counterName) {
try {
Method getCounterMethod = context.getClass().getMethod("getCounter", Enum.class);
return ((Counter) getCounterMethod.invoke(context, counterName)).getValue();
} catch (Exception e) {
throw new RuntimeException("Error reading record count counter", e);
}
}
}
| 1,854 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.math3.primes.Primes;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.avro.MRCompactorAvroKeyDedupJobRunner;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.HadoopUtils;
/**
* Configurator for compaction job.
* Different data formats should have their own impl. for this interface.
*
*/
@Slf4j
public abstract class CompactionJobConfigurator {
public static final String COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY = "compaction.jobConfiguratorFactory.class";
public static final String DEFAULT_COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS =
"org.apache.gobblin.compaction.mapreduce.CompactionAvroJobConfigurator$Factory";
@Getter
@AllArgsConstructor
protected enum EXTENSION {
AVRO("avro"), ORC("orc");
private String extensionString;
}
protected final State state;
@Getter
protected final FileSystem fs;
// Below attributes are MR related
@Getter
protected Job configuredJob;
@Getter
protected final boolean shouldDeduplicate;
@Getter
protected Path mrOutputPath = null;
@Getter
protected boolean isJobCreated = false;
@Getter
protected Collection<Path> mapReduceInputPaths = null;
//All the old files, which is needed when emit GMCE to register iceberg data
@Getter
protected Collection<String> oldFiles = null;
//All the new files in the final publish dir, which is needed when emit GMCE to register iceberg data
@Getter
@Setter
protected Collection<Path> dstNewFiles = null;
@Getter
protected long fileNameRecordCount = 0;
public interface ConfiguratorFactory {
CompactionJobConfigurator createConfigurator(State state) throws IOException;
}
public CompactionJobConfigurator(State state) throws IOException {
this.state = state;
this.fs = getFileSystem(state);
this.shouldDeduplicate = state.getPropAsBoolean(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, true);
}
public static CompactionJobConfigurator instantiateConfigurator(State state) {
String compactionConfiguratorFactoryClass =
state.getProp(COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY, DEFAULT_COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS);
try {
return Class.forName(compactionConfiguratorFactoryClass)
.asSubclass(ConfiguratorFactory.class)
.newInstance()
.createConfigurator(state);
} catch (ReflectiveOperationException | IOException e) {
throw new RuntimeException("Failed to instantiate a instance of job configurator:", e);
}
}
public abstract String getFileExtension();
/**
* Customized MR job creation for Avro.
*
* @param dataset A path or directory which needs compaction
* @return A configured map-reduce job for avro compaction
*/
public Job createJob(FileSystemDataset dataset) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state);
// Turn on mapreduce output compression by default
if (conf.get("mapreduce.output.fileoutputformat.compress") == null && conf.get("mapred.output.compress") == null) {
conf.setBoolean("mapreduce.output.fileoutputformat.compress", true);
}
// Disable delegation token cancellation by default
if (conf.get("mapreduce.job.complete.cancel.delegation.tokens") == null) {
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
}
addJars(conf, this.state, fs);
Job job = Job.getInstance(conf);
job.setJobName(MRCompactorJobRunner.HADOOP_JOB_NAME);
boolean emptyDirectoryFlag = this.configureInputAndOutputPaths(job, dataset);
if (emptyDirectoryFlag) {
this.state.setProp(HiveRegistrationPolicy.MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY, true);
}
this.configureMapper(job);
this.configureReducer(job);
if (emptyDirectoryFlag || !this.shouldDeduplicate) {
job.setNumReduceTasks(0);
}
// Configure schema at the last step because FilesInputFormat will be used internally
this.configureSchema(job);
this.isJobCreated = true;
this.configuredJob = job;
return job;
}
/**
* Configuring Mapper/Reducer's input/output schema for compaction MR job.
* The input schema for Mapper should be obtained from to-be-compacted file.
* The output schema for Mapper is for dedup.
* The output schema for Reducer should be identical to input schema of Mapper.
* @param job The compaction jobConf.
* @throws IOException
*/
protected abstract void configureSchema(Job job) throws IOException;
/**
* Configuring Mapper class, specific to data format.
*/
protected abstract void configureMapper(Job job);
/**
* Configuring Reducer class, specific to data format.
*/
protected abstract void configureReducer(Job job) throws IOException;
protected FileSystem getFileSystem(State state) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state);
String uri = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI);
return FileSystem.get(URI.create(uri), conf);
}
/**
* Refer to {@link MRCompactorAvroKeyDedupJobRunner#setNumberOfReducers(Job)}
* Note that this method is not format specific.
*/
protected void setNumberOfReducers(Job job) throws IOException {
// get input size
long inputSize = 0;
for (Path inputPath : this.mapReduceInputPaths) {
inputSize += this.fs.getContentSummary(inputPath).getLength();
}
// get target file size
long targetFileSize =
this.state.getPropAsLong(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE,
MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE);
// get max reducers
int maxNumReducers = state.getPropAsInt(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_MAX_NUM_REDUCERS,
MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_MAX_NUM_REDUCERS);
int numReducers = Math.min(Ints.checkedCast(inputSize / targetFileSize) + 1, maxNumReducers);
// get use prime reducers
boolean usePrimeReducers =
state.getPropAsBoolean(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_USE_PRIME_REDUCERS,
MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_USE_PRIME_REDUCERS);
if (usePrimeReducers && numReducers != 1) {
numReducers = Primes.nextPrime(numReducers);
}
job.setNumReduceTasks(numReducers);
}
protected void addJars(Configuration conf, State state, FileSystem fs) throws IOException {
if (!state.contains(MRCompactor.COMPACTION_JARS)) {
return;
}
Path jarFileDir = new Path(state.getProp(MRCompactor.COMPACTION_JARS));
for (FileStatus status : fs.listStatus(jarFileDir)) {
DistributedCache.addFileToClassPath(status.getPath(), conf, fs);
}
}
/**
* Refer to MRCompactorAvroKeyDedupJobRunner#configureInputAndOutputPaths(Job).
* @return false if no valid input paths present for MR job to process, where a path is valid if it is
* a directory containing one or more files.
*
*/
protected boolean configureInputAndOutputPaths(Job job, FileSystemDataset dataset) throws IOException {
boolean emptyDirectoryFlag = false;
String mrOutputBase = this.state.getProp(MRCompactor.COMPACTION_JOB_DIR);
CompactionPathParser parser = new CompactionPathParser(this.state);
CompactionPathParser.CompactionParserResult rst = parser.parse(dataset);
this.mrOutputPath = concatPaths(mrOutputBase, rst.getDatasetName(), rst.getDstSubDir(), rst.getTimeString());
if(this.state.contains(ConfigurationKeys.USE_DATASET_LOCAL_WORK_DIR)) {
mrOutputBase = this.state.getProp(MRCompactor.COMPACTION_DEST_DIR);
this.mrOutputPath = concatPaths(mrOutputBase, rst.getDatasetName(),
ConfigurationKeys.TMP_DIR, rst.getDstSubDir(), rst.getTimeString());
}
log.info("Cleaning temporary MR output directory: " + mrOutputPath);
this.fs.delete(mrOutputPath, true);
this.mapReduceInputPaths = getGranularInputPaths(dataset.datasetRoot());
if (this.mapReduceInputPaths.isEmpty()) {
this.mapReduceInputPaths.add(dataset.datasetRoot());
emptyDirectoryFlag = true;
}
this.oldFiles = new HashSet<>();
for (Path path : mapReduceInputPaths) {
oldFiles.add(this.fs.makeQualified(path).toString());
FileInputFormat.addInputPath(job, path);
}
FileOutputFormat.setOutputPath(job, mrOutputPath);
return emptyDirectoryFlag;
}
/**
* Concatenate multiple directory or file names into one path
*
* @return Concatenated path or null if the parameter is empty
*/
private Path concatPaths(String... names) {
if (names == null || names.length == 0) {
return null;
}
Path cur = new Path(names[0]);
for (int i = 1; i < names.length; ++i) {
cur = new Path(cur, new Path(names[i]));
}
return cur;
}
/**
* Converts a top level input path to a group of sub-paths according to user defined granularity.
* This may be required because if upstream application generates many sub-paths but the map-reduce
* job only keeps track of the top level path, after the job is done, we won't be able to tell if
* those new arriving sub-paths is processed by previous map-reduce job or not. Hence a better way
* is to pre-define those sub-paths as input paths before we start to run MR. The implementation of
* this method should depend on the data generation granularity controlled by upstream. Here we just
* list the deepest level of containing folder as the smallest granularity.
*
* @param path top level directory needs compaction
* @return A collection of input paths which will participate in map-reduce job
*/
protected Collection<Path> getGranularInputPaths(Path path) throws IOException {
boolean appendDelta = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
Set<Path> uncompacted = Sets.newHashSet();
Set<Path> total = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, path)) {
if (appendDelta) {
// use source dir suffix to identify the delta input paths
if (!fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
uncompacted.add(fileStatus.getPath().getParent());
}
total.add(fileStatus.getPath().getParent());
} else {
uncompacted.add(fileStatus.getPath().getParent());
}
}
if (appendDelta) {
// When the output record count from mr counter doesn't match
// the record count from input file names, we prefer file names because
// it will be used to calculate the difference of count in next run.
this.fileNameRecordCount = new InputRecordCountHelper(this.state).calculateRecordCount(total);
log.info("{} has total input record count (based on file name) {}", path, this.fileNameRecordCount);
}
return uncompacted;
}
private static List<TaskCompletionEvent> getAllTaskCompletionEvent(Job completedJob) {
List<TaskCompletionEvent> completionEvents = new LinkedList<>();
while (true) {
try {
TaskCompletionEvent[] bunchOfEvents;
bunchOfEvents = completedJob.getTaskCompletionEvents(completionEvents.size());
if (bunchOfEvents == null || bunchOfEvents.length == 0) {
break;
}
completionEvents.addAll(Arrays.asList(bunchOfEvents));
} catch (IOException e) {
break;
}
}
return completionEvents;
}
private static List<TaskCompletionEvent> getUnsuccessfulTaskCompletionEvent(Job completedJob) {
return getAllTaskCompletionEvent(completedJob).stream()
.filter(te -> te.getStatus() != TaskCompletionEvent.Status.SUCCEEDED)
.collect(Collectors.toList());
}
private static boolean isFailedPath(Path path, List<TaskCompletionEvent> failedEvents) {
return path.toString().contains("_temporary") || failedEvents.stream()
.anyMatch(
event -> path.toString().contains(Path.SEPARATOR + event.getTaskAttemptId().toString() + Path.SEPARATOR));
}
/**
* Get good files
* The problem happens when speculative task attempt initialized but then killed in the middle of processing.
* Some partial file was generated at {tmp_output}/_temporary/1/_temporary/attempt_xxx_xxx/xxxx(Avro file
* might have .avro as extension file name), without being committed to its final destination
* at {tmp_output}/xxxx.
*
* @param job Completed MR job
* @param fs File system that can handle file system
* @param acceptableExtension file extension acceptable as "good files".
* @return all successful files that has been committed
*/
public static List<Path> getGoodFiles(Job job, Path tmpPath, FileSystem fs, List<String> acceptableExtension)
throws IOException {
List<TaskCompletionEvent> failedEvents = getUnsuccessfulTaskCompletionEvent(job);
List<Path> allFilePaths = DatasetHelper.getApplicableFilePaths(fs, tmpPath, acceptableExtension);
List<Path> goodPaths = new ArrayList<>();
for (Path filePath : allFilePaths) {
if (isFailedPath(filePath, failedEvents)) {
fs.delete(filePath, false);
log.error("{} is a bad path so it was deleted", filePath);
} else {
goodPaths.add(filePath);
}
}
return goodPaths;
}
}
| 1,855 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobPropCreator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.FileListUtils;
/**
* This class creates the following properties for a single MapReduce job for compaction:
* compaction.topic, compaction.job.input.dir, compaction.job.dest.dir, compaction.job.dest.dir.
*
* @author Ziyang Liu
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* instead of using timestamp only before.
*/
public class MRCompactorJobPropCreator {
private static final Logger LOG = LoggerFactory.getLogger(MRCompactorJobPropCreator.class);
static class Builder {
Dataset dataset;
FileSystem fs;
State state;
double lateDataThresholdForRecompact;
Builder withDataset(Dataset dataset) {
this.dataset = dataset;
return this;
}
Builder withFileSystem(FileSystem fs) {
this.fs = fs;
return this;
}
Builder withState(State state) {
this.state = new State();
this.state.addAll(state);
return this;
}
Builder withLateDataThresholdForRecompact(double thresholdForRecompact) {
this.lateDataThresholdForRecompact = thresholdForRecompact;
return this;
}
MRCompactorJobPropCreator build() {
return new MRCompactorJobPropCreator(this);
}
}
protected final Dataset dataset;
protected final FileSystem fs;
protected final State state;
protected final boolean inputDeduplicated;
protected final boolean outputDeduplicated;
protected final double lateDataThresholdForRecompact;
protected final boolean renameSourceDirEnabled;
// Whether we should recompact the input folders if new data files are found in the input folders.
protected final boolean recompactFromInputPaths;
// Whether we should recompact the output folders if there are late data files in the output '_late' folder.
// If this is set to true, input folders of the datasets will be ignored. The output folders and the
// output '_late' folders will be used as input to compaction jobs.
protected final boolean recompactFromOutputPaths;
private MRCompactorJobPropCreator(Builder builder) {
this.dataset = builder.dataset;
this.fs = builder.fs;
this.state = builder.state;
this.lateDataThresholdForRecompact = builder.lateDataThresholdForRecompact;
this.inputDeduplicated = this.state.getPropAsBoolean(MRCompactor.COMPACTION_INPUT_DEDUPLICATED,
MRCompactor.DEFAULT_COMPACTION_INPUT_DEDUPLICATED);
this.outputDeduplicated = this.state.getPropAsBoolean(MRCompactor.COMPACTION_OUTPUT_DEDUPLICATED,
MRCompactor.DEFAULT_COMPACTION_OUTPUT_DEDUPLICATED);
this.recompactFromInputPaths =
this.state.getPropAsBoolean(MRCompactor.COMPACTION_RECOMPACT_FROM_INPUT_FOR_LATE_DATA,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_FROM_INPUT_FOR_LATE_DATA);
this.recompactFromOutputPaths = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RECOMPACT_FROM_DEST_PATHS,
MRCompactor.DEFAULT_COMPACTION_RECOMPACT_FROM_DEST_PATHS);
this.renameSourceDirEnabled = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
}
protected List<Dataset> createJobProps() throws IOException {
if (Iterables.tryFind(this.dataset.inputPaths(), new Predicate<Path>() {
public boolean apply(Path input) {
try {
return MRCompactorJobPropCreator.this.fs.exists(input);
} catch (IOException e) {
MRCompactorJobPropCreator.LOG.error(String.format("Failed to check if %s exits", new Object[] { input }), e);
}
return false;
}
}).isPresent()) {
Optional<Dataset> datasetWithJobProps = createJobProps(this.dataset);
if (datasetWithJobProps.isPresent()) {
setCompactionSLATimestamp((Dataset) datasetWithJobProps.get());
return ImmutableList.of(datasetWithJobProps.get());
}
return ImmutableList.of();
}
LOG.warn("Input folders " + this.dataset.inputPaths() + " do not exist. Skipping dataset " + this.dataset);
return ImmutableList.of();
}
private void setCompactionSLATimestamp(Dataset dataset) {
// Set up SLA timestamp only if this dataset will be compacted and MRCompactor.COMPACTION_INPUT_PATH_TIME is present.
if ((this.recompactFromOutputPaths || !MRCompactor.datasetAlreadyCompacted(this.fs, dataset, this.renameSourceDirEnabled))
&& dataset.jobProps().contains(MRCompactor.COMPACTION_INPUT_PATH_TIME)) {
long timeInMills = dataset.jobProps().getPropAsLong(MRCompactor.COMPACTION_INPUT_PATH_TIME);
// Set the upstream time to partition + 1 day. E.g. for 2015/10/13 the upstream time is midnight of 2015/10/14
CompactionSlaEventHelper.setUpstreamTimeStamp(this.state,
timeInMills + TimeUnit.MILLISECONDS.convert(1, TimeUnit.DAYS));
}
}
private boolean latePathsFound(Dataset dataset) throws IOException, FileNotFoundException {
for (Path lateInputPath : dataset.inputLatePaths()) {
if ((this.fs.exists(lateInputPath)) && (this.fs.listStatus(lateInputPath).length > 0)) {
return true;
}
}
return false;
}
/**
* Create MR job properties for a {@link Dataset}.
*/
protected Optional<Dataset> createJobProps(Dataset dataset) throws IOException {
if (this.recompactFromOutputPaths && (!latePathsFound(dataset))) {
LOG.info(String.format("Skipping recompaction for %s since there is no late data in %s",
new Object[] { dataset.inputPaths(), dataset.inputLatePaths() }));
return Optional.absent();
}
State jobProps = new State();
jobProps.addAll(this.state);
jobProps.setProp(MRCompactor.COMPACTION_ENABLE_SUCCESS_FILE, false);
jobProps.setProp(MRCompactor.COMPACTION_INPUT_DEDUPLICATED, this.inputDeduplicated);
jobProps.setProp(MRCompactor.COMPACTION_OUTPUT_DEDUPLICATED, this.outputDeduplicated);
jobProps.setProp(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, !this.inputDeduplicated && this.outputDeduplicated);
if (this.recompactFromOutputPaths || !MRCompactor.datasetAlreadyCompacted(this.fs, dataset, renameSourceDirEnabled)) {
if (renameSourceDirEnabled) {
Set<Path> newUnrenamedDirs = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(this.fs, dataset.inputPaths());
if (getAllFilePathsRecursively(newUnrenamedDirs).isEmpty()) {
return Optional.absent();
}
LOG.info ("[{}] has unprocessed directories for first time compaction: {}", dataset.getDatasetName(), newUnrenamedDirs);
dataset.overwriteInputPaths(newUnrenamedDirs);
dataset.setRenamePaths(newUnrenamedDirs);
} else {
addInputLateFilesForFirstTimeCompaction(jobProps, dataset);
}
LOG.info(String.format("Created MR job properties for input %s and output %s.", dataset.inputPaths(),
dataset.outputPath()));
dataset.setJobProps(jobProps);
return Optional.of(dataset);
} else {
return obtainDatasetWithJobProps (jobProps, dataset);
}
}
private void addInputLateFilesForFirstTimeCompaction(State jobProps, Dataset dataset) throws IOException {
if ((latePathsFound(dataset)) && (this.outputDeduplicated)) {
dataset.addAdditionalInputPaths(dataset.inputLatePaths());
if (this.outputDeduplicated) {
// If input contains late data (i.e., input data is not deduplicated) and output data should be deduplicated,
// run a deduping compaction instead of non-deduping compaction.
jobProps.setProp(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, true);
}
}
}
private Set<Path> getAllFilePathsRecursively (Set<Path> paths) throws IOException{
Set<Path> allPaths = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, paths)) {
allPaths.add(fileStatus.getPath());
}
return allPaths;
}
private Optional<Dataset> obtainDatasetWithJobProps (State jobProps, Dataset dataset) throws IOException {
if (this.recompactFromInputPaths) {
LOG.info(String.format("Will recompact for %s.", dataset.outputPath()));
addInputLateFilesForFirstTimeCompaction(jobProps, dataset);
} else {
Set<Path> newDataFiles = new HashSet<>();
do {
if (renameSourceDirEnabled) {
Set<Path> newUnrenamedDirs = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(this.fs, dataset.inputPaths());
if (newUnrenamedDirs.isEmpty()) {
LOG.info("[{}] doesn't have unprocessed directories", dataset.getDatasetName());
break;
}
Set<Path> allFiles = getAllFilePathsRecursively(newUnrenamedDirs);
if (allFiles.isEmpty()) {
LOG.info("[{}] has unprocessed directories but all empty: {}", dataset.getDatasetName(), newUnrenamedDirs);
break;
}
dataset.setRenamePaths(newUnrenamedDirs);
newDataFiles.addAll(allFiles);
LOG.info("[{}] has unprocessed directories: {}", dataset.getDatasetName(), newUnrenamedDirs);
} else {
newDataFiles = getNewDataInFolder(dataset.inputPaths(), dataset.outputPath());
Set<Path> newDataFilesInLatePath = getNewDataInFolder(dataset.inputLatePaths(), dataset.outputPath());
newDataFiles.addAll(newDataFilesInLatePath);
if (newDataFiles.isEmpty()) {
break;
}
if (!newDataFilesInLatePath.isEmpty()) {
dataset.addAdditionalInputPaths(dataset.inputLatePaths());
}
}
} while (false);
if (newDataFiles.isEmpty()) {
// Although no new data come in, it is needed to check if late directory has remaining data for two reasons:
// 1) Previous compaction job may move data to the late directory but haven't compacted them before a job is killed or timed out.
// 2) Provide a chance to look at if late data has been existed too long, so the recompact condition will be set.
// When late data exists and it is required to move, we modify the dataset state to recompact state so only
// re-compaction flow will run.
if (isOutputLateDataExists (dataset)) {
LOG.info ("{} don't have new data, but previous late data still remains, check if it requires to move", dataset.getDatasetName());
dataset.setJobProps(jobProps);
dataset.checkIfNeedToRecompact(new DatasetHelper(dataset, this.fs, Lists.newArrayList("avro")));
if (dataset.needToRecompact()) {
MRCompactor.modifyDatasetStateToRecompact (dataset);
} else {
return Optional.absent();
}
} else {
return Optional.absent();
}
} else {
LOG.info(String.format("Will copy %d new data files for %s", newDataFiles.size(), dataset.outputPath()));
jobProps.setProp(MRCompactor.COMPACTION_JOB_LATE_DATA_MOVEMENT_TASK, true);
jobProps.setProp(MRCompactor.COMPACTION_JOB_LATE_DATA_FILES, Joiner.on(",").join(newDataFiles));
}
}
dataset.setJobProps(jobProps);
return Optional.of(dataset);
}
private boolean isOutputLateDataExists (Dataset dataset) throws IOException {
if (!this.fs.exists(dataset.outputLatePath())) {
return false;
}
return this.fs.listStatus(dataset.outputLatePath()).length > 0;
}
private Set<Path> getNewDataInFolder(Set<Path> inputFolders, Path outputFolder) throws IOException {
Set<Path> paths = Sets.newHashSet();
for (Path inputFolder : inputFolders) {
paths.addAll(getNewDataInFolder(inputFolder, outputFolder));
}
return paths;
}
/**
* Check if inputFolder contains any files which have modification times which are more
* recent than the last compaction time as stored within outputFolder; return any files
* which do. An empty list will be returned if all files are older than the last compaction time.
*/
private Set<Path> getNewDataInFolder(Path inputFolder, Path outputFolder) throws IOException {
Set<Path> newFiles = Sets.newHashSet();
if (!this.fs.exists(inputFolder) || !this.fs.exists(outputFolder)) {
return newFiles;
}
DateTime lastCompactionTime = new DateTime(MRCompactor.readCompactionTimestamp(this.fs, outputFolder));
for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputFolder)) {
DateTime fileModificationTime = new DateTime(fstat.getModificationTime());
if (fileModificationTime.isAfter(lastCompactionTime)) {
LOG.info ("[" + fileModificationTime.getMillis() + "] " + fstat.getPath() + " is after " + lastCompactionTime.getMillis());
newFiles.add(fstat.getPath());
}
}
if (!newFiles.isEmpty()) {
LOG.info(String.format("Found %d new files within folder %s which are more recent than the previous "
+ "compaction start time of %s.", newFiles.size(), inputFolder, lastCompactionTime));
}
return newFiles;
}
/**
* Create a {@link Dataset} with the given {@link Throwable}. This {@link Dataset} will be skipped by setting
* its state to {@link Dataset.DatasetState#COMPACTION_COMPLETE}, and the {@link Throwable} will be added to
* the {@link Dataset}.
*/
public Dataset createFailedJobProps(Throwable t) {
this.dataset.setJobProps(this.state);
this.dataset.skip(t);
return this.dataset;
}
}
| 1,856 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.math3.primes.Primes;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.mortbay.log.Log;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.Retryer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.primitives.Ints;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigRenderOptions;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.RecordCountProvider;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import org.apache.gobblin.util.recordcount.LateFileRecordCountProvider;
import org.apache.gobblin.util.retry.RetryerFactory;
import static org.apache.gobblin.compaction.mapreduce.MRCompactor.COMPACTION_PREFIX;
import static org.apache.gobblin.util.retry.RetryerFactory.*;
/**
* This class is responsible for configuring and running a single MR job.
* It should be extended by a subclass that properly configures the mapper and reducer related classes.
*
* The properties that control the number of reducers are compaction.target.output.file.size and
* compaction.max.num.reducers. The number of reducers will be the smaller of
* [total input size] / [compaction.target.output.file.size] + 1 and [compaction.max.num.reducers].
*
* If {@value MRCompactor#COMPACTION_JOB_LATE_DATA_MOVEMENT_TASK} is set to true, does not
* launch an MR job. Instead, just copies the files present in
* {@value MRCompactor#COMPACTION_JOB_LATE_DATA_FILES} to a 'late' subdirectory within
* the output directory.
*
* @author Ziyang Liu
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* instead of using timestamp only before.
*/
@SuppressWarnings("deprecation")
public abstract class MRCompactorJobRunner implements Runnable, Comparable<MRCompactorJobRunner> {
private static final Logger LOG = LoggerFactory.getLogger(MRCompactorJobRunner.class);
private static final String COMPACTION_JOB_PREFIX = "compaction.job.";
/**
* Properties related to the compaction job of a dataset.
*/
public static final String COMPACTION_JOB_OUTPUT_DIR_PERMISSION = COMPACTION_JOB_PREFIX + "output.dir.permission";
public static final String COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE =
COMPACTION_JOB_PREFIX + "target.output.file.size";
public static final long DEFAULT_COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE = 536870912;
public static final String COMPACTION_JOB_MAX_NUM_REDUCERS = COMPACTION_JOB_PREFIX + "max.num.reducers";
public static final int DEFAULT_COMPACTION_JOB_MAX_NUM_REDUCERS = 3600;
private static final String COMPACTION_JOB_OVERWRITE_OUTPUT_DIR = COMPACTION_JOB_PREFIX + "overwrite.output.dir";
private static final boolean DEFAULT_COMPACTION_JOB_OVERWRITE_OUTPUT_DIR = false;
private static final String COMPACTION_JOB_ABORT_UPON_NEW_DATA = COMPACTION_JOB_PREFIX + "abort.upon.new.data";
private static final boolean DEFAULT_COMPACTION_JOB_ABORT_UPON_NEW_DATA = false;
private static final String COMPACTION_COPY_LATE_DATA_THREAD_POOL_SIZE =
COMPACTION_JOB_PREFIX + "copy.latedata.thread.pool.size";
private static final int DEFAULT_COMPACTION_COPY_LATE_DATA_THREAD_POOL_SIZE = 5;
// If true, the MR job will use either 1 reducer or a prime number of reducers.
public static final String COMPACTION_JOB_USE_PRIME_REDUCERS = COMPACTION_JOB_PREFIX + "use.prime.reducers";
public static final boolean DEFAULT_COMPACTION_JOB_USE_PRIME_REDUCERS = true;
public static final String HADOOP_JOB_NAME = "Gobblin MR Compaction";
private static final long MR_JOB_CHECK_COMPLETE_INTERVAL_MS = 5000;
private final boolean isRetryEnabled;
private final String tmpFsUri;
public enum Policy {
// The job runner is permitted to publish the data.
DO_PUBLISH_DATA,
// The job runner can proceed with the compaction for now but should not publish the data.
DO_NOT_PUBLISH_DATA,
// The job runner should abort asap without publishing data.
ABORT_ASAP
}
public enum Status {
ABORTED,
COMMITTED,
RUNNING
}
protected final Dataset dataset;
protected final FileSystem fs;
protected final FileSystem tmpFs;
protected final FsPermission perm;
protected final boolean shouldDeduplicate;
protected final boolean outputDeduplicated;
protected final boolean recompactFromDestPaths;
protected final boolean recompactAllData;
protected final boolean renameSourceDir;
protected final boolean usePrimeReducers;
protected final EventSubmitter eventSubmitter;
private final RecordCountProvider inputRecordCountProvider;
private final RecordCountProvider outputRecordCountProvider;
private final LateFileRecordCountProvider lateInputRecordCountProvider;
private final LateFileRecordCountProvider lateOutputRecordCountProvider;
private final DatasetHelper datasetHelper;
private final int copyLateDataThreadPoolSize;
private final String outputExtension;
private volatile Policy policy = Policy.DO_NOT_PUBLISH_DATA;
private volatile Status status = Status.RUNNING;
private final Cache<Path, List<Path>> applicablePathCache;
static final String COMPACTION_RETRY_PREFIX = COMPACTION_JOB_PREFIX + "retry.";
static final String COMPACTION_RETRY_ENABLED = COMPACTION_RETRY_PREFIX + "enabled";
static final String COMPACTION_TMP_FS = COMPACTION_PREFIX + "tmp.fs";
static final Config COMPACTION_RETRY_DEFAULTS;
static {
Map<String, Object> configMap =
ImmutableMap.<String, Object>builder()
.put(RETRY_TIME_OUT_MS, TimeUnit.MINUTES.toMillis(2L)) //Overall retry for 2 minutes
.put(RETRY_INTERVAL_MS, TimeUnit.SECONDS.toMillis(5L)) //Try to retry 5 seconds
.put(RETRY_MULTIPLIER, 2L) // Multiply by 2 every attempt
.put(RETRY_TYPE, RetryType.EXPONENTIAL.name())
.build();
COMPACTION_RETRY_DEFAULTS = ConfigFactory.parseMap(configMap);
};
protected final Config retrierConfig;
protected MRCompactorJobRunner(Dataset dataset, FileSystem fs) {
this.dataset = dataset;
this.fs = fs;
this.perm = HadoopUtils.deserializeFsPermission(this.dataset.jobProps(), COMPACTION_JOB_OUTPUT_DIR_PERMISSION,
FsPermission.getDefault());
this.recompactFromDestPaths = this.dataset.jobProps().getPropAsBoolean(
MRCompactor.COMPACTION_RECOMPACT_FROM_DEST_PATHS, MRCompactor.DEFAULT_COMPACTION_RECOMPACT_FROM_DEST_PATHS);
this.recompactAllData = this.dataset.jobProps().getPropAsBoolean(
MRCompactor.COMPACTION_RECOMPACT_ALL_DATA, MRCompactor.DEFAULT_COMPACTION_RECOMPACT_ALL_DATA);
this.renameSourceDir = this.dataset.jobProps().getPropAsBoolean(
MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED, MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
Preconditions.checkArgument(this.dataset.jobProps().contains(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE),
String.format("Missing property %s for dataset %s", MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, this.dataset));
this.shouldDeduplicate = this.dataset.jobProps().getPropAsBoolean(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE);
this.outputDeduplicated = this.dataset.jobProps().getPropAsBoolean(MRCompactor.COMPACTION_OUTPUT_DEDUPLICATED,
MRCompactor.DEFAULT_COMPACTION_OUTPUT_DEDUPLICATED);
this.usePrimeReducers = this.dataset.jobProps().getPropAsBoolean(COMPACTION_JOB_USE_PRIME_REDUCERS,
DEFAULT_COMPACTION_JOB_USE_PRIME_REDUCERS);
this.eventSubmitter = new EventSubmitter.Builder(
GobblinMetrics.get(this.dataset.jobProps().getProp(ConfigurationKeys.JOB_NAME_KEY)).getMetricContext(),
MRCompactor.COMPACTION_TRACKING_EVENTS_NAMESPACE).build();
this.copyLateDataThreadPoolSize = this.dataset.jobProps().getPropAsInt(COMPACTION_COPY_LATE_DATA_THREAD_POOL_SIZE,
DEFAULT_COMPACTION_COPY_LATE_DATA_THREAD_POOL_SIZE);
this.tmpFsUri = this.dataset.jobProps().getProp(COMPACTION_TMP_FS,
null);
try {
Log.info("Tmp fs uri:"+this.tmpFsUri);
if (this.tmpFsUri != null) {
this.tmpFs = FileSystem.get(new URI(this.tmpFsUri), new Configuration());
} else {
this.tmpFs = MRCompactorJobRunner.this.fs;
}
} catch (Exception e) {
throw new RuntimeException("Failed get Filesystem from tmp fs uri", e);
}
try {
this.inputRecordCountProvider = (RecordCountProvider) Class
.forName(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_INPUT_RECORD_COUNT_PROVIDER,
MRCompactor.DEFAULT_COMPACTION_INPUT_RECORD_COUNT_PROVIDER))
.newInstance();
this.outputRecordCountProvider = (RecordCountProvider) Class
.forName(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER,
MRCompactor.DEFAULT_COMPACTION_OUTPUT_RECORD_COUNT_PROVIDER))
.newInstance();
this.lateInputRecordCountProvider = new LateFileRecordCountProvider(this.inputRecordCountProvider);
this.lateOutputRecordCountProvider = new LateFileRecordCountProvider(this.outputRecordCountProvider);
this.isRetryEnabled= this.dataset.jobProps().getPropAsBoolean(COMPACTION_RETRY_ENABLED,
false);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate RecordCountProvider", e);
}
this.applicablePathCache = CacheBuilder.newBuilder().maximumSize(2000).build();
this.datasetHelper = new DatasetHelper(this.dataset, this.fs, this.getApplicableFileExtensions());
this.outputExtension = this.dataset.jobProps().getProp(MRCompactor.COMPACTION_FILE_EXTENSION, ".avro");
if (this.isRetryEnabled) {
this.retrierConfig = ConfigBuilder.create()
.loadProps(this.dataset.jobProps().getProperties(), COMPACTION_RETRY_PREFIX)
.build()
.withFallback(COMPACTION_RETRY_DEFAULTS);
LOG.info("Retry enabled for compaction publish :"+ retrierConfig.root().render(ConfigRenderOptions.concise()));
} else {
this.retrierConfig = WriterUtils.NO_RETRY_CONFIG;
LOG.info("Retry disabled for compaction");
}
}
@Override
public void run() {
Configuration conf = HadoopUtils.getConfFromState(this.dataset.jobProps());
// Turn on mapreduce output compression by default
if (conf.get("mapreduce.output.fileoutputformat.compress") == null && conf.get("mapred.output.compress") == null) {
conf.setBoolean("mapreduce.output.fileoutputformat.compress", true);
}
// Disable delegation token cancellation by default
if (conf.get("mapreduce.job.complete.cancel.delegation.tokens") == null) {
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
}
try {
DateTime compactionTimestamp = getCompactionTimestamp();
LOG.info("MR Compaction Job Timestamp " + compactionTimestamp.getMillis());
if (this.dataset.jobProps().getPropAsBoolean(MRCompactor.COMPACTION_JOB_LATE_DATA_MOVEMENT_TASK, false)) {
List<Path> newLateFilePaths = Lists.newArrayList();
for (String filePathString : this.dataset.jobProps()
.getPropAsList(MRCompactor.COMPACTION_JOB_LATE_DATA_FILES)) {
if (FilenameUtils.isExtension(filePathString, getApplicableFileExtensions())) {
newLateFilePaths.add(new Path(filePathString));
}
}
Path lateDataOutputPath = this.outputDeduplicated ? this.dataset.outputLatePath() : this.dataset.outputPath();
LOG.info(String.format("Copying %d late data files to %s", newLateFilePaths.size(), lateDataOutputPath));
if (this.outputDeduplicated) {
if (!this.fs.exists(lateDataOutputPath)) {
if (!this.fs.mkdirs(lateDataOutputPath)) {
throw new RuntimeException(
String.format("Failed to create late data output directory: %s.", lateDataOutputPath.toString()));
}
}
}
this.copyDataFiles(lateDataOutputPath, newLateFilePaths);
if (this.outputDeduplicated) {
dataset.checkIfNeedToRecompact (datasetHelper);
}
this.status = Status.COMMITTED;
} else {
if (this.fs.exists(this.dataset.outputPath()) && !canOverwriteOutputDir()) {
LOG.warn(String.format("Output paths %s exists. Will not compact %s.", this.dataset.outputPath(),
this.dataset.inputPaths()));
this.status = Status.COMMITTED;
return;
}
addJars(conf);
Job job = Job.getInstance(conf);
this.configureJob(job);
this.submitAndWait(job);
if (shouldPublishData(compactionTimestamp)) {
// remove all invalid empty files due to speculative task execution
List<Path> goodPaths = CompactionJobConfigurator.getGoodFiles(job, this.dataset.outputTmpPath(), this.tmpFs,
ImmutableList.of("avro"));
if (!this.recompactAllData && this.recompactFromDestPaths) {
// append new files without deleting output directory
addGoodFilesToOutputPath(goodPaths);
// clean up late data from outputLateDirectory, which has been set to inputPath
deleteFilesByPaths(this.dataset.inputPaths());
} else {
moveTmpPathToOutputPath();
if (this.recompactFromDestPaths) {
deleteFilesByPaths(this.dataset.additionalInputPaths());
}
}
submitSlaEvent(job);
LOG.info("Successfully published data for input folder " + this.dataset.inputPaths());
this.status = Status.COMMITTED;
} else {
LOG.info("Data not published for input folder " + this.dataset.inputPaths() + " due to incompleteness");
this.status = Status.ABORTED;
return;
}
}
if (renameSourceDir) {
MRCompactor.renameSourceDirAsCompactionComplete (this.fs, this.dataset);
} else {
this.markOutputDirAsCompleted(compactionTimestamp);
}
this.submitRecordsCountsEvent();
} catch (Throwable t) {
throw Throwables.propagate(t);
}
}
/**
* For regular compactions, compaction timestamp is the time the compaction job starts.
*
* If this is a recompaction from output paths, the compaction timestamp will remain the same as previously
* persisted compaction time. This is because such a recompaction doesn't consume input data, so next time,
* whether a file in the input folder is considered late file should still be based on the previous compaction
* timestamp.
*/
private DateTime getCompactionTimestamp() throws IOException {
DateTimeZone timeZone = DateTimeZone.forID(
this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
if (!this.recompactFromDestPaths) {
return new DateTime(timeZone);
}
Set<Path> inputPaths = getInputPaths();
long maxTimestamp = Long.MIN_VALUE;
for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, inputPaths)) {
maxTimestamp = Math.max(maxTimestamp, status.getModificationTime());
}
return maxTimestamp == Long.MIN_VALUE ? new DateTime(timeZone) : new DateTime(maxTimestamp, timeZone);
}
private void copyDataFiles(final Path outputDirectory, List<Path> inputFilePaths) throws IOException {
ExecutorService executor = ScalingThreadPoolExecutor.newScalingThreadPool(0, this.copyLateDataThreadPoolSize, 100,
ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of(this.dataset.getName() + "-copy-data")));
List<Future<?>> futures = Lists.newArrayList();
for (final Path filePath : inputFilePaths) {
Future<Void> future = executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Path convertedFilePath = MRCompactorJobRunner.this.outputRecordCountProvider.convertPath(
LateFileRecordCountProvider.restoreFilePath(filePath),
MRCompactorJobRunner.this.outputExtension,
MRCompactorJobRunner.this.inputRecordCountProvider);
String targetFileName = convertedFilePath.getName();
Path outPath = MRCompactorJobRunner.this.lateOutputRecordCountProvider.constructLateFilePath(targetFileName,
MRCompactorJobRunner.this.fs, outputDirectory);
HadoopUtils.copyPath (MRCompactorJobRunner.this.fs, filePath, MRCompactorJobRunner.this.fs, outPath, true,
MRCompactorJobRunner.this.fs.getConf());
LOG.debug(String.format("Copied %s to %s.", filePath, outPath));
return null;
}
});
futures.add(future);
}
try {
for (Future<?> future : futures) {
future.get();
}
} catch (ExecutionException | InterruptedException e) {
throw new IOException("Failed to copy file.", e);
} finally {
ExecutorsUtils.shutdownExecutorService(executor, Optional.of(LOG));
}
}
private boolean canOverwriteOutputDir() {
return this.dataset.jobProps().getPropAsBoolean(COMPACTION_JOB_OVERWRITE_OUTPUT_DIR,
DEFAULT_COMPACTION_JOB_OVERWRITE_OUTPUT_DIR) || this.recompactFromDestPaths;
}
private void addJars(Configuration conf) throws IOException {
if (!this.dataset.jobProps().contains(MRCompactor.COMPACTION_JARS)) {
return;
}
Path jarFileDir = new Path(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_JARS));
for (FileStatus status : this.fs.listStatus(jarFileDir)) {
DistributedCache.addFileToClassPath(status.getPath(), conf, this.fs);
}
}
protected void configureJob(Job job) throws IOException {
job.setJobName(HADOOP_JOB_NAME + " (" + this.dataset.getDatasetName() + ")");
configureInputAndOutputPaths(job);
configureMapper(job);
configureReducer(job);
if (!this.shouldDeduplicate) {
job.setNumReduceTasks(0);
}
}
private void configureInputAndOutputPaths(Job job) throws IOException {
for (Path inputPath : getInputPaths()) {
FileInputFormat.addInputPath(job, inputPath);
}
//MR output path must not exist when MR job starts, so delete if exists.
this.tmpFs.delete(this.dataset.outputTmpPath(), true);
FileOutputFormat.setOutputPath(job, this.dataset.outputTmpPath());
}
private Set<Path> getInputPaths() {
return ImmutableSet.<Path> builder().addAll(this.dataset.inputPaths()).addAll(this.dataset.additionalInputPaths())
.build();
}
public Dataset getDataset() {
return this.dataset;
}
protected void configureMapper(Job job) {
setInputFormatClass(job);
setMapperClass(job);
setMapOutputKeyClass(job);
setMapOutputValueClass(job);
}
protected void configureReducer(Job job) throws IOException {
setOutputFormatClass(job);
setReducerClass(job);
setOutputKeyClass(job);
setOutputValueClass(job);
setNumberOfReducers(job);
}
protected abstract void setInputFormatClass(Job job);
protected abstract void setMapperClass(Job job);
protected abstract void setMapOutputKeyClass(Job job);
protected abstract void setMapOutputValueClass(Job job);
protected abstract void setOutputFormatClass(Job job);
protected abstract void setReducerClass(Job job);
protected abstract void setOutputKeyClass(Job job);
protected abstract void setOutputValueClass(Job job);
protected abstract Collection<String> getApplicableFileExtensions();
protected void setNumberOfReducers(Job job) throws IOException {
long inputSize = getInputSize();
long targetFileSize = getTargetFileSize();
int numReducers = Math.min(Ints.checkedCast(inputSize / targetFileSize) + 1, getMaxNumReducers());
if (this.usePrimeReducers && numReducers != 1) {
numReducers = Primes.nextPrime(numReducers);
}
job.setNumReduceTasks(numReducers);
}
private long getInputSize() throws IOException {
long inputSize = 0;
for (Path inputPath : this.getInputPaths()) {
inputSize += this.fs.getContentSummary(inputPath).getLength();
}
return inputSize;
}
private long getTargetFileSize() {
return this.dataset.jobProps().getPropAsLong(COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE,
DEFAULT_COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE);
}
private int getMaxNumReducers() {
return this.dataset.jobProps().getPropAsInt(COMPACTION_JOB_MAX_NUM_REDUCERS,
DEFAULT_COMPACTION_JOB_MAX_NUM_REDUCERS);
}
private void submitAndWait(Job job) throws ClassNotFoundException, IOException, InterruptedException {
job.submit();
MRCompactor.addRunningHadoopJob(this.dataset, job);
LOG.info(String.format("MR job submitted for dataset %s, input %s, url: %s", this.dataset, getInputPaths(),
job.getTrackingURL()));
while (!job.isComplete()) {
if (this.policy == Policy.ABORT_ASAP) {
LOG.info(String.format(
"MR job for dataset %s, input %s killed due to input data incompleteness." + " Will try again later",
this.dataset, getInputPaths()));
job.killJob();
return;
}
Thread.sleep(MR_JOB_CHECK_COMPLETE_INTERVAL_MS);
}
if (!job.isSuccessful()) {
throw new RuntimeException(String.format("MR job failed for topic %s, input %s, url: %s", this.dataset,
getInputPaths(), job.getTrackingURL()));
}
}
/**
* Data should be published if: (1) this.policy == {@link Policy#DO_PUBLISH_DATA}; (2) either
* compaction.abort.upon.new.data=false, or no new data is found in the input folder since jobStartTime.
*/
private boolean shouldPublishData(DateTime jobStartTime) throws IOException {
if (this.policy != Policy.DO_PUBLISH_DATA) {
return false;
}
if (!this.dataset.jobProps().getPropAsBoolean(COMPACTION_JOB_ABORT_UPON_NEW_DATA,
DEFAULT_COMPACTION_JOB_ABORT_UPON_NEW_DATA)) {
return true;
}
for (Path inputPath : getInputPaths()) {
if (findNewDataSinceCompactionStarted(inputPath, jobStartTime)) {
return false;
}
}
return true;
}
private boolean findNewDataSinceCompactionStarted(Path inputPath, DateTime jobStartTime) throws IOException {
for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputPath)) {
DateTime fileModificationTime = new DateTime(fstat.getModificationTime());
if (fileModificationTime.isAfter(jobStartTime)) {
LOG.info(String.format("Found new file %s in input folder %s after compaction started. Will abort compaction.",
fstat.getPath(), inputPath));
return true;
}
}
return false;
}
private void markOutputDirAsCompleted(DateTime jobStartTime) throws IOException {
Path completionFilePath = new Path(this.dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME);
try (FSDataOutputStream completionFileStream = this.fs.create(completionFilePath)) {
completionFileStream.writeLong(jobStartTime.getMillis());
}
}
private void moveTmpPathToOutputPath() throws IOException {
Retryer<Void> retryer = RetryerFactory.newInstance(this.retrierConfig);
LOG.info(String.format("Moving %s to %s", this.dataset.outputTmpPath(), this.dataset.outputPath()));
this.fs.delete(this.dataset.outputPath(), true);
if (this.isRetryEnabled) {
try {
retryer.call(() -> {
if (fs.exists(this.dataset.outputPath())) {
throw new IOException("Path " + this.dataset.outputPath() + " exists however it should not. Will wait more.");
}
return null;
});
} catch (Exception e) {
throw new IOException(e);
}
}
WriterUtils.mkdirsWithRecursivePermissionWithRetry(MRCompactorJobRunner.this.fs, this.dataset.outputPath().getParent(), this.perm, this.retrierConfig);
Log.info("Moving from fs: ("+MRCompactorJobRunner.this.tmpFs.getUri()+") path: "+ this.dataset.outputTmpPath() + " to "+ "fs: ("+ FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()).getUri()+") output path: " + this.dataset.outputPath());
HadoopUtils.movePath (MRCompactorJobRunner.this.tmpFs, this.dataset.outputTmpPath(), FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()), this.dataset.outputPath(), false, this.fs.getConf()) ;
}
private void addGoodFilesToOutputPath (List<Path> goodPaths) throws IOException {
for (Path path: goodPaths) {
String fileName = path.getName();
LOG.info(String.format("Adding %s to %s", path.toString(), this.dataset.outputPath()));
Path outPath = MRCompactorJobRunner.this.lateOutputRecordCountProvider.constructLateFilePath(fileName,
MRCompactorJobRunner.this.fs, this.dataset.outputPath());
HadoopUtils.movePath(MRCompactorJobRunner.this.tmpFs, path,
FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()), outPath, false, this.fs.getConf());
}
}
private void deleteFilesByPaths(Set<Path> paths) throws IOException {
for (Path path : paths) {
HadoopUtils.deletePathAndEmptyAncestors(this.fs, path, true);
}
}
/**
* Tell the {@link MRCompactorJobRunner} that it can go ahead and publish the data.
*/
public void proceed() {
this.policy = Policy.DO_PUBLISH_DATA;
}
public void abort() {
this.policy = Policy.ABORT_ASAP;
}
/**
* The status of the MRCompactorJobRunner.
* @return RUNNING, COMMITTED or ABORTED.
*/
public Status status() {
return this.status;
}
@Override
public int compareTo(MRCompactorJobRunner o) {
return Double.compare(o.dataset.priority(), this.dataset.priority());
}
/**
* Get the list of file {@link Path}s in the given dataDir, which satisfy the extension requirements
* of {@link #getApplicableFileExtensions()}.
*/
private List<Path> getApplicableFilePaths(final Path dataDir, final FileSystem fs) throws IOException {
try {
return applicablePathCache.get(dataDir, new Callable<List<Path>>() {
@Override
public List<Path> call() throws Exception {
if (!MRCompactorJobRunner.this.fs.exists(dataDir)) {
return Lists.newArrayList();
}
List<Path> paths = Lists.newArrayList();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, dataDir,
new PathFilter() {
@Override
public boolean accept(Path path) {
for (String validExtention : getApplicableFileExtensions()) {
if (path.getName().endsWith(validExtention)) {
return true;
}
}
return false;
}
})) {
paths.add(fileStatus.getPath());
}
return paths;
}
});
} catch (ExecutionException e) {
throw new IOException(e);
}
}
/**
* Submit an event when compaction MR job completes
*/
private void submitSlaEvent(Job job) {
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME)
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputLatePath(), this.fs))))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputPath(), this.fs))))
.additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME,
Boolean.toString(this.dataset.needToRecompact())).build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit compaction completed event:" + e, e);
}
}
/**
* Submit an event reporting late record counts and non-late record counts.
*/
private void submitRecordsCountsEvent() {
long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount();
long outputRecordCount = this.datasetHelper.getOutputRecordCount();
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT)
.additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString())
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(lateOutputRecordCount))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(outputRecordCount))
.additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact()))
.build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit late event count:" + e, e);
}
}
}
| 1,857 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/RecordKeyDedupReducerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import com.google.common.base.Optional;
import java.io.IOException;
import java.util.Comparator;
import lombok.Getter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;
/**
* A base implementation of deduplication reducer that is format-unaware.
*/
public abstract class RecordKeyDedupReducerBase<KI, VI, KO, VO> extends Reducer<KI, VI, KO, VO> {
public enum EVENT_COUNTER {
MORE_THAN_1, DEDUPED, RECORD_COUNT
}
/**
* In most of cases, one of following will be {@link NullWritable}
*/
@Getter
protected KO outKey;
@Getter
protected VO outValue;
protected Optional<Comparator<VI>> deltaComparatorOptional;
protected abstract void initReusableObject();
/**
* Assign output value to reusable object.
* @param valueToRetain the output value determined after dedup process.
*/
protected abstract void setOutKey(VI valueToRetain);
/**
* Added to avoid loss of flexibility to put output value in key/value.
* Usually for compaction job, either implement {@link #setOutKey} or this.
*/
protected abstract void setOutValue(VI valueToRetain);
protected abstract void initDeltaComparator(Configuration conf);
@Override
protected void setup(Context context) {
initReusableObject();
initDeltaComparator(context.getConfiguration());
}
@Override
protected void reduce(KI key, Iterable<VI> values, Context context)
throws IOException, InterruptedException {
int numVals = 0;
VI valueToRetain = null;
// Preserve only one values among all duplicates.
for (VI value : values) {
if (valueToRetain == null) {
valueToRetain = value;
} else if (deltaComparatorOptional.isPresent()) {
valueToRetain = deltaComparatorOptional.get().compare(valueToRetain, value) >= 0 ? valueToRetain : value;
}
numVals++;
}
writeRetainedValue(valueToRetain, context);
updateCounters(numVals, context);
}
protected void writeRetainedValue(VI valueToRetain, Context context)
throws IOException, InterruptedException {
setOutKey(valueToRetain);
setOutValue(valueToRetain);
// Safety check
if (outKey == null || outValue == null) {
throw new IllegalStateException("Either outKey or outValue is not being properly initialized");
}
context.write(this.outKey, this.outValue);
}
/**
* Update the MR counter based on input {@param numDuplicates}, which indicates the times of duplication of a
* record seen in a reducer call.
*/
protected void updateCounters(int numDuplicates, Context context) {
if (numDuplicates > 1) {
context.getCounter(EVENT_COUNTER.MORE_THAN_1).increment(1);
context.getCounter(EVENT_COUNTER.DEDUPED).increment(numDuplicates - 1);
}
context.getCounter(EVENT_COUNTER.RECORD_COUNT).increment(1);
}
}
| 1,858 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionOrcJobConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcValue;
import org.apache.gobblin.compaction.mapreduce.orc.OrcKeyCompactorOutputFormat;
import org.apache.gobblin.compaction.mapreduce.orc.OrcKeyComparator;
import org.apache.gobblin.compaction.mapreduce.orc.OrcKeyDedupReducer;
import org.apache.gobblin.compaction.mapreduce.orc.OrcUtils;
import org.apache.gobblin.compaction.mapreduce.orc.OrcValueCombineFileInputFormat;
import org.apache.gobblin.compaction.mapreduce.orc.OrcValueMapper;
import org.apache.gobblin.configuration.State;
import static org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter.COMPACTION_OUTPUT_EXTENSION;
import static org.apache.gobblin.compaction.mapreduce.orc.OrcUtils.eligibleForUpConvert;
import static org.apache.gobblin.writer.GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCH_SIZE;
import static org.apache.gobblin.writer.GobblinOrcWriterConfigs.ORC_WRITER_BATCH_SIZE;
public class CompactionOrcJobConfigurator extends CompactionJobConfigurator {
/**
* The key schema for the shuffle output.
*/
public static final String ORC_MAPPER_SHUFFLE_KEY_SCHEMA = "orcMapperShuffleSchema";
private String orcMapperShuffleSchemaString;
public static class Factory implements CompactionJobConfigurator.ConfiguratorFactory {
@Override
public CompactionJobConfigurator createConfigurator(State state) throws IOException {
return new CompactionOrcJobConfigurator(state);
}
}
public CompactionOrcJobConfigurator(State state) throws IOException {
super(state);
this.orcMapperShuffleSchemaString = state.getProp(ORC_MAPPER_SHUFFLE_KEY_SCHEMA, StringUtils.EMPTY);
}
@Override
public String getFileExtension() {
return this.state.getProp(COMPACTION_OUTPUT_EXTENSION, EXTENSION.ORC.getExtensionString());
}
protected void configureSchema(Job job) throws IOException {
TypeDescription schema = OrcUtils.getNewestSchemaFromSource(job, this.fs);
job.getConfiguration().set(OrcConf.MAPRED_INPUT_SCHEMA.getAttribute(), schema.toString());
// Determine the shuffle-schema: Only take the user-specified shuffle-schema if it is upconvertable
// Check the eligibleForUpConvert method for the definition of eligibility.
if (!orcMapperShuffleSchemaString.isEmpty()
&& eligibleForUpConvert(schema, TypeDescription.fromString(orcMapperShuffleSchemaString))) {
job.getConfiguration().set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), orcMapperShuffleSchemaString);
} else {
job.getConfiguration().set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), schema.toString());
}
job.getConfiguration().set(OrcConf.MAPRED_SHUFFLE_VALUE_SCHEMA.getAttribute(), schema.toString());
job.getConfiguration().set(OrcConf.MAPRED_OUTPUT_SCHEMA.getAttribute(), schema.toString());
}
private int getWriterRowBatchSize() {
return this.state.getPropAsInt(ORC_WRITER_BATCH_SIZE, DEFAULT_ORC_WRITER_BATCH_SIZE);
}
protected void setOrcWriterBatchSize(Job job) {
job.getConfiguration().setInt(ORC_WRITER_BATCH_SIZE, getWriterRowBatchSize());
}
protected void configureMapper(Job job) {
job.setInputFormatClass(OrcValueCombineFileInputFormat.class);
job.setMapperClass(OrcValueMapper.class);
job.setMapOutputKeyClass(OrcKey.class);
job.setMapOutputValueClass(OrcValue.class);
job.setGroupingComparatorClass(OrcKeyComparator.class);
job.setSortComparatorClass(OrcKeyComparator.class);
}
protected void configureReducer(Job job) throws IOException {
job.setReducerClass(OrcKeyDedupReducer.class);
job.setOutputFormatClass(OrcKeyCompactorOutputFormat.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(OrcValue.class);
setNumberOfReducers(job);
setOrcWriterBatchSize(job);
}
}
| 1,859 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactionRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Properties;
import org.apache.commons.cli.ParseException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.compaction.CliOptions;
import org.apache.gobblin.compaction.Compactor;
import org.apache.gobblin.compaction.CompactorCreationException;
import org.apache.gobblin.compaction.CompactorFactory;
import org.apache.gobblin.compaction.ReflectionCompactorFactory;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.compaction.listeners.CompactorListenerCreationException;
import org.apache.gobblin.compaction.listeners.CompactorListenerFactory;
import org.apache.gobblin.compaction.listeners.ReflectionCompactorListenerFactory;
import org.apache.gobblin.metrics.Tag;
/**
* A class for launching a Gobblin MR job for compaction through command line.
*
* @author Lorand Bendig
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* instead of using timestamp only before.
*
*/
@Deprecated
public class MRCompactionRunner {
private static final Logger LOG = LoggerFactory.getLogger(MRCompactionRunner.class);
private final Properties properties;
private final Compactor compactor;
public MRCompactionRunner(Properties properties) {
this.properties = properties;
this.compactor = getCompactor(getCompactorFactory(), getCompactorListener(getCompactorListenerFactory()));
}
public static void main(String[] args)
throws IOException, ConfigurationException, ParseException, URISyntaxException {
Properties jobProperties = CliOptions.parseArgs(MRCompactionRunner.class, args, new Configuration());
MRCompactionRunner compactionRunner = new MRCompactionRunner(jobProperties);
compactionRunner.compact();
}
public void compact() throws IOException {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
compactor.cancel();
} catch (IOException e) {
LOG.warn("Unable to cancel the compactor jobs!", e);
}
}
});
try {
compactor.compact();
} catch (Exception e) {
compactor.cancel();
}
}
protected CompactorListenerFactory getCompactorListenerFactory() {
return new ReflectionCompactorListenerFactory();
}
protected CompactorFactory getCompactorFactory() {
return new ReflectionCompactorFactory();
}
private Compactor getCompactor(CompactorFactory compactorFactory, Optional<CompactorListener> compactorListener) {
try {
return compactorFactory.createCompactor(this.properties, new ArrayList<Tag<String>>(), compactorListener);
} catch (CompactorCreationException e) {
throw new RuntimeException("Unable to create compactor", e);
}
}
private Optional<CompactorListener> getCompactorListener(CompactorListenerFactory compactorListenerFactory) {
try {
return compactorListenerFactory.createCompactorListener(this.properties);
} catch (CompactorListenerCreationException e) {
throw new RuntimeException("Unable to create compactor listener", e);
}
}
}
| 1,860 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactionTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
import com.google.common.collect.ImmutableMap;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.suite.CompactionSuite;
import org.apache.gobblin.compaction.suite.CompactionSuiteUtils;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.runtime.mapreduce.MRTask;
/**
* Customized task of type {@link MRTask}, which runs MR job to compact dataset.
* The job creation is delegated to {@link CompactionSuite#createJob(Dataset)}
* After job is created, {@link MRCompactionTask#run()} is invoked and after compaction is finished.
* a callback {@link CompactionSuite#getCompactionCompleteActions()} will be invoked
*/
@Slf4j
public class MRCompactionTask extends MRTask {
public static final String RECORD_COUNT = "counter.recordCount";
public static final String FILE_COUNT = "counter.fileCount";
public static final String BYTE_COUNT = "counter.byteCount";
protected final CompactionSuite suite;
protected final Dataset dataset;
protected final EventSubmitter eventSubmitter;
/**
* Constructor
*/
public MRCompactionTask(TaskContext taskContext) throws IOException {
super(taskContext);
this.suite = CompactionSuiteUtils.getCompactionSuiteFactory (taskContext.getTaskState()).
createSuite(taskContext.getTaskState());
this.dataset = this.suite.load(taskContext.getTaskState());
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, MRCompactor.COMPACTION_TRACKING_EVENTS_NAMESPACE)
.addMetadata(additionalEventMetadata()).build();
}
/**
* Below three steps are performed for a compaction task:
* Do verifications before a map-reduce job is launched.
* Start a map-reduce job and wait until it is finished
* Do post-actions after map-reduce job is finished
*/
@Override
public void run() {
List<CompactionVerifier> verifiers = this.suite.getMapReduceVerifiers();
for (CompactionVerifier verifier : verifiers) {
if (!verifier.verify(dataset).isSuccessful()) {
log.error("Verification {} for {} is not passed.", verifier.getName(), dataset.getUrn());
this.onMRTaskComplete (false, new IOException("Compaction verification for MR is failed"));
return;
}
}
if (dataset instanceof FileSystemDataset
&& ((FileSystemDataset)dataset).isVirtual()) {
log.info("A trivial compaction job as there is no physical data for {}."
+ "Will trigger a success complete directly", dataset.getUrn());
this.onMRTaskComplete(true, null);
return;
}
super.run();
}
public void onMRTaskComplete (boolean isSuccess, Throwable throwable) {
if (isSuccess) {
try {
setCounterInfo(taskContext.getTaskState());
List<CompactionCompleteAction> actions = this.suite.getCompactionCompleteActions();
for (CompactionCompleteAction action: actions) {
action.addEventSubmitter(eventSubmitter);
action.onCompactionJobComplete(dataset);
}
submitEvent(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME);
super.onMRTaskComplete(true, null);
} catch (IOException e) {
submitEvent(CompactionSlaEventHelper.COMPACTION_FAILED_EVENT_NAME);
super.onMRTaskComplete(false, e);
}
} else {
submitEvent(CompactionSlaEventHelper.COMPACTION_FAILED_EVENT_NAME);
super.onMRTaskComplete(false, throwable);
}
}
private void setCounterInfo(TaskState taskState)
throws IOException {
if (mrJob == null) {
return;
}
long recordCount = getCounterValue(mrJob, RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT);
if (recordCount == 0) {
// map only job
recordCount = getCounterValue(mrJob, RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT);
}
taskState.setProp(RECORD_COUNT, recordCount);
taskState.setProp(FILE_COUNT, getCounterValue(mrJob, CompactorOutputCommitter.EVENT_COUNTER.OUTPUT_FILE_COUNT));
taskState.setProp(BYTE_COUNT, getCounterValue(mrJob, FileOutputFormatCounter.BYTES_WRITTEN));
}
private long getCounterValue(Job job, Enum<?> key)
throws IOException {
return job.getCounters().findCounter(key).getValue();
}
private void submitEvent(String eventName) {
Map<String, String> eventMetadataMap = ImmutableMap.of(CompactionSlaEventHelper.DATASET_URN, this.dataset.datasetURN());
this.eventSubmitter.submit(eventName, eventMetadataMap);
}
/**
* Create a map-reduce job
* The real job configuration is delegated to {@link CompactionSuite#createJob(Dataset)}
*
* @return a map-reduce job
*/
protected Job createJob() throws IOException {
return this.suite.createJob(dataset);
}
}
| 1,861 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactionTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.mapreduce.MRTaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
/**
* A subclass of {@link MRTaskFactory} which provides a customized {@link MRCompactionTask} instance
*/
public class MRCompactionTaskFactory extends MRTaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
try {
return new MRCompactionTask(taskContext);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 1,862 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/test/TestCompactionOrcJobConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.test;
import java.io.IOException;
import org.apache.hadoop.mapreduce.Job;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.compaction.mapreduce.CompactionOrcJobConfigurator;
import org.apache.gobblin.configuration.State;
public class TestCompactionOrcJobConfigurator extends CompactionOrcJobConfigurator {
public static class Factory implements CompactionJobConfigurator.ConfiguratorFactory {
@Override
public TestCompactionOrcJobConfigurator createConfigurator(State state) throws IOException {
return new TestCompactionOrcJobConfigurator(state);
}
}
@Override
protected void setNumberOfReducers(Job job) {
job.setNumReduceTasks(1);
}
public TestCompactionOrcJobConfigurator(State state) throws IOException {
super(state);
}
} | 1,863 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/test/TestCompactionTaskUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.test;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
public class TestCompactionTaskUtils {
public static final String PATH_SEPARATOR = "/";
public static final String DEFAULT_INPUT_SUBDIR_TYPE = "minutely";
public static EmbeddedGobblin createEmbeddedGobblinCompactionJob(String name, String basePath) {
return createEmbeddedGobblinCompactionJob(name, basePath, DEFAULT_INPUT_SUBDIR_TYPE);
}
public static EmbeddedGobblin createEmbeddedGobblinCompactionJob(String name, String basePath, String inputSubdirType) {
String pattern;
String outputSubdirType;
if (inputSubdirType.equals(DEFAULT_INPUT_SUBDIR_TYPE)) {
pattern = new Path(basePath, "*/*/minutely/*/*/*/*").toString();
outputSubdirType = "hourly";
} else {
pattern = new Path(basePath, "*/*/hourly/*/*/*").toString();
outputSubdirType = "daily";
}
return new EmbeddedGobblin(name)
.setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, CompactionSource.class.getName())
.setConfiguration(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY, pattern)
.setConfiguration(MRCompactor.COMPACTION_INPUT_DIR, basePath)
.setConfiguration(MRCompactor.COMPACTION_INPUT_SUBDIR, inputSubdirType)
.setConfiguration(MRCompactor.COMPACTION_DEST_DIR, basePath)
.setConfiguration(MRCompactor.COMPACTION_DEST_SUBDIR, outputSubdirType)
.setConfiguration(MRCompactor.COMPACTION_TMP_DEST_DIR, "/tmp/compaction/" + name)
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MAX_TIME_AGO, "3000d")
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MIN_TIME_AGO, "1d")
.setConfiguration(ConfigurationKeys.MAX_TASK_RETRIES_KEY, "0");
}
} | 1,864 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/test/TestOrcCompactionTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.test;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Calendar;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.tools.convert.ConvertTool;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import org.apache.gobblin.compaction.mapreduce.CompactionJobConfigurator;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import static org.apache.gobblin.compaction.mapreduce.test.TestCompactionTaskUtils.PATH_SEPARATOR;
/**
* A convenience class for running ORC compaction locally. Particularly useful for local debugging. The method takes as
* input a resource folder containing Json files containing input records, and an ORC data1.schema file, and generates
* input ORC files in hourly folders under ${HOME_DIRECTORY}/data using the ORC {@link ConvertTool}. The ORC input data
* for compaction job is written under: ${HOME_DIRECTORY}/data/tracking/testTopic/hourly/YYYY/MM/DD/HH.
* The output of the compaction job is written under: ${HOME_DIRECTORY}/data/tracking/testTopic/daily/YYYY/MM/DD.
* The year, month, day is derived from the header.time field in the input records.
*
* Assumptions:
* <ul>
* <li>The input data has the header.time field, which is assumed to be the epoch time in millis</li>
* <li>Associated with each json file, the must be a corresponding schema file containing the ORC schema definition. The schema file
* must have the same filename (without extension) as the corresponding json file. See the orcCompactionTest resource folder for
* an example. </li>
* </ul>
*
* When running the main() method in your IDE, make sure to remove the hive-exec, log4j-over-slf4j and xerces jars from
* the Project's External Libraries.
*
*/
public class TestOrcCompactionTask {
private static final JsonParser PARSER = new JsonParser();
private static final String HOURLY_SUBDIR = "tracking/testTopic/hourly";
private static final String JSON_FILE_EXTENSION = "json";
private static final String TEST_RESOURCE_FOLDER_NAME = "orcCompactionTest";
public static void main(String[] args) throws Exception {
File basePath = new File(System.getProperty("user.home"), TEST_RESOURCE_FOLDER_NAME);
if (basePath.exists()) {
FileUtils.deleteDirectory(basePath);
}
boolean mkdirs = basePath.mkdirs();
Preconditions.checkArgument(mkdirs, "Unable to create: " + basePath.getAbsolutePath());
URL resourceURL = TestOrcCompactionTask.class.getClassLoader().getResource(TEST_RESOURCE_FOLDER_NAME);
Preconditions.checkArgument(resourceURL != null, "Could not find resource: " + TEST_RESOURCE_FOLDER_NAME);
File resourceDirectory = new File(resourceURL.getFile());
for (File file: resourceDirectory.listFiles()) {
if(isJsonFile(file)) {
createOrcFile(file, basePath.getAbsolutePath());
}
}
EmbeddedGobblin embeddedGobblin =
TestCompactionTaskUtils.createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath(), "hourly")
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName());
embeddedGobblin.run();
}
private static void createOrcFile(File file, String basePath)
throws IOException, ParseException {
JsonElement jsonElement;
try (Reader reader = new InputStreamReader(new FileInputStream(file), Charset.defaultCharset())) {
jsonElement = PARSER.parse(reader);
}
//Get header.time
long timestamp = jsonElement.getAsJsonObject().get("header").getAsJsonObject().get("time").getAsLong();
File hourlyPath = new File(getPath(basePath, timestamp));
if (!hourlyPath.exists()) {
boolean result = hourlyPath.mkdirs();
Preconditions.checkArgument(result, "Unable to create: " + hourlyPath.getAbsolutePath());
}
String fileNameWithoutExtensions = Files.getNameWithoutExtension(file.getName());
File schemaFile = new File(file.getParent(), fileNameWithoutExtensions + ".schema");
String orcSchema = FileUtils.readFileToString(schemaFile, Charset.defaultCharset());
String orcFileName = hourlyPath.getAbsolutePath() + PATH_SEPARATOR + fileNameWithoutExtensions + ".orc";
File orcFile = new File(orcFileName);
//Delete if file already exists
if (orcFile.exists()) {
boolean result = orcFile.delete();
Preconditions.checkArgument(result, "Unable to delete: " + orcFile.getAbsolutePath());
}
//Convert to ORC using the corresponding schema
String[] convertToolArgs = new String[]{"-s", orcSchema, file.getAbsolutePath(), "-o", orcFileName};
ConvertTool.main(new Configuration(), convertToolArgs);
}
/**
* A helper method that returns the absolute path of the hourly folder given a timestamp and a basePath.
* @param basePath e.g. /Users/foo/orcCompactionTaskTest
* @param timestamp the unix timestamp in milliseconds
* @return the output path of the hourly folder e.g. /Users/foo/orcCompactionTaskTest/hourly/2020/08/20/12
*/
private static String getPath(String basePath, Long timestamp) {
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(timestamp);
String year = Integer.toString(calendar.get(Calendar.YEAR));
String month = String.format("%02d", calendar.get(Calendar.MONTH));
String day = String.format("%02d", calendar.get(Calendar.DAY_OF_MONTH));
String hour = String.format("%02d", calendar.get(Calendar.HOUR_OF_DAY));
return Joiner.on(PATH_SEPARATOR).join(basePath, HOURLY_SUBDIR, year, month, day, hour);
}
private static boolean isJsonFile(File file) {
return Files.getFileExtension(file.getName()).equals(JSON_FILE_EXTENSION);
}
}
| 1,865 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/FieldAttributeBasedDeltaFieldsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.ObjectNode;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import lombok.extern.slf4j.Slf4j;
/**
* Extends {@link AvroDeltaFieldNameProvider}, which relies on field {@link #ATTRIBUTE_FIELD} in record schema to get the delta schema.
*/
@Slf4j
public class FieldAttributeBasedDeltaFieldsProvider implements AvroDeltaFieldNameProvider {
public static final String ATTRIBUTE_FIELD =
"org.apache.gobblin.compaction." + FieldAttributeBasedDeltaFieldsProvider.class.getSimpleName() + ".deltaAttributeField";
public static final String DELTA_PROP_NAME =
"org.apache.gobblin.compaction." + FieldAttributeBasedDeltaFieldsProvider.class.getSimpleName() + ".deltaPropName";
public static final String DEFAULT_DELTA_PROP_NAME = "delta";
private final String attributeField;
private final String deltaPropName;
private final LoadingCache<Schema, List<String>> recordSchemaToDeltaSchemaCache;
public FieldAttributeBasedDeltaFieldsProvider (Configuration conf) {
this.attributeField = conf.get(ATTRIBUTE_FIELD);
Preconditions.checkArgument(attributeField != null, "Missing config " + ATTRIBUTE_FIELD);
this.deltaPropName = conf.get(DELTA_PROP_NAME, DEFAULT_DELTA_PROP_NAME);
this.recordSchemaToDeltaSchemaCache=
CacheBuilder.newBuilder().maximumSize(100).build(new CacheLoader<Schema, List<String>>() {
@Override
public List<String> load(Schema schema)
throws Exception {
return getDeltaFieldNamesForNewSchema(schema);
}
});
}
@Override
public List<String> getDeltaFieldNames(GenericRecord record) {
try {
return recordSchemaToDeltaSchemaCache.get(record.getSchema());
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
private List<String> getDeltaFieldNamesForNewSchema(Schema originalSchema) {
List<String> deltaFields = new ArrayList<>();
for (Field field : originalSchema.getFields()) {
// Avro 1.9 compatible change - replaced deprecated public api getJsonProp with AvroCompatibilityHelper methods
String deltaAttributeField = AvroCompatibilityHelper.getFieldPropAsJsonString(field, this.attributeField,
true, false);
ObjectNode objectNode = getDeltaPropValue(deltaAttributeField);
if (objectNode == null || objectNode.get(this.deltaPropName) == null) {
continue;
}
if (Boolean.parseBoolean(objectNode.get(this.deltaPropName).toString())) {
deltaFields.add(field.name());
}
}
log.info("Will use delta fields: " + deltaFields);
return deltaFields;
}
private ObjectNode getDeltaPropValue(String json) {
try {
JsonFactory jf = new JsonFactory();
JsonParser jp = jf.createJsonParser(json);
ObjectMapper objMap = new ObjectMapper(jf);
jp.setCodec(objMap);
JsonNode jsonNode = jp.readValueAsTree();
return (ObjectNode) objMap.readTree(jsonNode.asText());
} catch (IOException e) {
return null;
}
}
}
| 1,866 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroKeyMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.io.IOException;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.gobblin.compaction.mapreduce.RecordKeyMapperBase;
/**
* Mapper class for compaction MR job for Avro data.
*
* For each input Avro record, it emits a key-value pair, where key is the projection of the input record
* on the attributes on which we de-duplicate, and value is the original record.
*
* If the number of reducers is set to 0, then it is an identity mapper.
*
* @author Ziyang Liu
*/
public class AvroKeyMapper extends
RecordKeyMapperBase<AvroKey<GenericRecord>, NullWritable, AvroKey<GenericRecord>, Object> {
private AvroKey<GenericRecord> outKey;
private AvroValue<GenericRecord> outValue;
private Schema keySchema;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
this.keySchema = AvroJob.getMapOutputKeySchema(context.getConfiguration());
this.outKey = new AvroKey<>();
this.outKey.datum(new GenericData.Record(this.keySchema));
this.outValue = new AvroValue<>();
}
@Override
protected void map(AvroKey<GenericRecord> key, NullWritable value, Context context)
throws IOException, InterruptedException {
if (context.getNumReduceTasks() == 0) {
context.write(key, NullWritable.get());
} else {
populateComparableKeyRecord(key.datum(), this.outKey.datum());
this.outValue.datum(key.datum());
try {
context.write(this.outKey, this.outValue);
} catch (AvroRuntimeException e) {
final Path[] paths = ((CombineFileSplit) context.getInputSplit()).getPaths();
throw new IOException("Unable to process paths " + StringUtils.join(paths, ','), e);
}
}
context.getCounter(EVENT_COUNTER.RECORD_COUNT).increment(1);
}
/**
* Populate the target record, based on the field values in the source record.
* Target record's schema should be a subset of source record's schema.
* Target record's schema cannot have MAP, ARRAY or ENUM fields, or UNION fields that
* contain these fields.
*/
private void populateComparableKeyRecord(GenericRecord source, GenericRecord target) {
for (Field field : target.getSchema().getFields()) {
if (field.schema().getType() == Schema.Type.UNION) {
// Since a UNION has multiple types, we need to use induce() to get the actual type in the record.
Object fieldData = source.get(field.name());
Schema actualFieldSchema = GenericData.get().induce(fieldData);
if (actualFieldSchema.getType() == Schema.Type.RECORD) {
// If the actual type is RECORD (which may contain another UNION), we need to recursively
// populate it.
for (Schema candidateType : field.schema().getTypes()) {
if (candidateType.getFullName().equals(actualFieldSchema.getFullName())) {
GenericRecord record = new GenericData.Record(candidateType);
target.put(field.name(), record);
populateComparableKeyRecord((GenericRecord) fieldData, record);
break;
}
}
} else {
target.put(field.name(), source.get(field.name()));
}
} else if (field.schema().getType() == Schema.Type.RECORD) {
GenericRecord record = (GenericRecord) target.get(field.name());
if (record == null) {
record = new GenericData.Record(field.schema());
target.put(field.name(), record);
}
populateComparableKeyRecord((GenericRecord) source.get(field.name()), record);
} else {
target.put(field.name(), source.get(field.name()));
}
}
}
}
| 1,867 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroKeyDedupReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import java.util.Comparator;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
/**
* Reducer class for compaction MR job for Avro data.
*
* If there are multiple values of the same key, it keeps the last value read.
*
* @author Ziyang Liu
*/
public class AvroKeyDedupReducer extends RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
AvroKey<GenericRecord>, NullWritable> {
public static final String DELTA_SCHEMA_PROVIDER =
"org.apache.gobblin.compaction." + AvroKeyDedupReducer.class.getSimpleName() + ".deltaFieldsProvider";
@Override
protected void initReusableObject() {
outKey = new AvroKey<>();
outValue = NullWritable.get();
}
@Override
protected void setOutKey(AvroValue<GenericRecord> valueToRetain) {
outKey.datum(valueToRetain.datum());
}
@Override
protected void setOutValue(AvroValue<GenericRecord> valueToRetain) {
// do nothing since initReusableObject has assigned value for outValue.
}
@Override
protected void initDeltaComparator(Configuration conf) {
deltaComparatorOptional = Optional.absent();
String deltaSchemaProviderClassName = conf.get(DELTA_SCHEMA_PROVIDER);
if (deltaSchemaProviderClassName != null) {
deltaComparatorOptional = Optional.of(new AvroValueDeltaSchemaComparator(
GobblinConstructorUtils.invokeConstructor(AvroDeltaFieldNameProvider.class, deltaSchemaProviderClassName,
conf)));
}
}
@VisibleForTesting
protected static class AvroValueDeltaSchemaComparator implements Comparator<AvroValue<GenericRecord>> {
private final AvroDeltaFieldNameProvider deltaSchemaProvider;
public AvroValueDeltaSchemaComparator(AvroDeltaFieldNameProvider provider) {
this.deltaSchemaProvider = provider;
}
@Override
public int compare(AvroValue<GenericRecord> o1, AvroValue<GenericRecord> o2) {
GenericRecord record1 = o1.datum();
GenericRecord record2 = o2.datum();
for (String deltaFieldName : this.deltaSchemaProvider.getDeltaFieldNames(record1)) {
if (record1.get(deltaFieldName).equals(record2.get(deltaFieldName))) {
continue;
}
return ((Comparable) record1.get(deltaFieldName)).compareTo(record2.get(deltaFieldName));
}
return 0;
}
}
}
| 1,868 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/ConfBasedDeltaFieldProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.testng.annotations.Test;
import com.google.common.base.Splitter;
/**
* Job config based {@link AvroDeltaFieldNameProvider}, which reads delta fields from config properties.
*/
@Test(groups = {"gobblin.compaction"})
public class ConfBasedDeltaFieldProvider implements AvroDeltaFieldNameProvider {
public static final String DELTA_FIELDS_KEY =
"org.apache.gobblin.compaction." + ConfBasedDeltaFieldProvider.class.getSimpleName() + ".deltaFields";
private final List<String> deltaFields;
public ConfBasedDeltaFieldProvider(Configuration conf) {
String deltaConfValue = conf.get(DELTA_FIELDS_KEY);
if (deltaConfValue == null) {
this.deltaFields = new ArrayList<>();
} else {
this.deltaFields = Splitter.on(',').omitEmptyStrings().trimResults().splitToList(deltaConfValue);
}
}
/**
* Return delta fields specified by {@link #DELTA_FIELDS_KEY}.
* The order of the returned list is consistent with the order in job conf.
*/
public List<String> getDeltaFieldNames(GenericRecord record) {
return this.deltaFields;
}
}
| 1,869 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroKeyCompactorOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.io.IOException;
import org.apache.avro.mapreduce.AvroKeyOutputFormat;
import org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* Class used with {@link MRCompactorAvroKeyDedupJobRunner} as an entirely normal
* {@link AvroKeyOutputFormat}, except that the outputted file names contain
* a timestamp and a count of how many records the file contains in the form:
* {recordCount}.{timestamp}.avro
*/
public class AvroKeyCompactorOutputFormat<T> extends AvroKeyOutputFormat<T> {
private FileOutputCommitter committer = null;
@Override
public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
if (this.committer == null) {
this.committer = new CompactorOutputCommitter(FileOutputFormat.getOutputPath(context), context);
}
return this.committer;
}
}
| 1,870 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroDeltaFieldNameProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.util.List;
import org.apache.avro.generic.GenericRecord;
/**
* Provides the delta field names for {@link AvroKeyDedupReducer}.
*/
public interface AvroDeltaFieldNameProvider {
/**
* Return the delta fields using the given {@link GenericRecord}.
*/
public List<String> getDeltaFieldNames(GenericRecord record);
}
| 1,871 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroKeyCombineFileRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.avro.mapreduce.AvroKeyRecordReader;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.gobblin.util.AvroUtils;
/**
* A subclass of {@link org.apache.avro.mapreduce.AvroKeyRecordReader}. The purpose is to add a constructor
* with signature (CombineFileSplit, TaskAttemptContext, Integer), which is required in order to use
* {@link org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader}.
*
* @author Ziyang Liu
*/
public class AvroKeyCombineFileRecordReader extends AvroKeyRecordReader<GenericRecord> {
private final CombineFileSplit split;
private final Integer idx;
@SuppressFBWarnings("BC_UNCONFIRMED_CAST")
public AvroKeyCombineFileRecordReader(CombineFileSplit split, TaskAttemptContext cx, Integer idx) throws IOException {
this(split, getSchema(split, cx, idx), idx);
}
private AvroKeyCombineFileRecordReader(CombineFileSplit split, Schema inputKeySchema, Integer idx) {
super(inputKeySchema);
this.split = split;
this.idx = idx;
}
@Override
public void initialize(InputSplit unusedSplit, TaskAttemptContext cx) throws IOException, InterruptedException {
super.initialize(
new FileSplit(this.split.getPath(this.idx), this.split.getOffset(this.idx), this.split.getLength(this.idx),
null), cx);
}
private static Schema getSchema(CombineFileSplit split, TaskAttemptContext cx, Integer idx) throws IOException {
Schema schema = AvroJob.getInputKeySchema(cx.getConfiguration());
if (schema != null) {
return schema;
}
Path path = split.getPath(idx);
FileSystem fs = path.getFileSystem(cx.getConfiguration());
return AvroUtils.getSchemaFromDataFile(path, fs);
}
}
| 1,872 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/AvroKeyRecursiveCombineFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.io.IOException;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.AvroKey;
import org.apache.gobblin.compaction.mapreduce.CompactionCombineFileInputFormat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
/**
* A subclass of {@link org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat} for Avro inputfiles.
* This class is able to handle the case where the input path has subdirs which contain data files, which
* is not the case with {@link org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat}.
*
* @author Ziyang Liu
*/
public class AvroKeyRecursiveCombineFileInputFormat
extends CompactionCombineFileInputFormat<AvroKey<GenericRecord>, NullWritable> {
@Override
public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx)
throws IOException {
return new CombineFileRecordReader<>((CombineFileSplit) split, cx, AvroKeyCombineFileRecordReader.class);
}
}
| 1,873 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/avro/MRCompactorAvroKeyDedupJobRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.SchemaCompatibility;
import org.apache.avro.SchemaCompatibility.SchemaCompatibilityType;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapreduce.AvroJob;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import org.apache.commons.io.FilenameUtils;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner;
import org.apache.gobblin.util.AvroUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A subclass of {@link org.apache.gobblin.compaction.mapreduce.MRCompactorJobRunner} that configures
* and runs MR compaction job for Avro data.
*
* To dedup using entire records set compaction.use.all.attributes=true. Otherwise, a schema needs
* to be provided by compaction.avro.key.schema.loc, based on which the dedup is performed.
*
* @author Ziyang Liu
*/
public class MRCompactorAvroKeyDedupJobRunner extends MRCompactorJobRunner {
private static final Logger LOG = LoggerFactory.getLogger(MRCompactorAvroKeyDedupJobRunner.class);
private static final String COMPACTION_JOB_PREFIX = "compaction.job.";
/**
* If true, the latest schema, determined from the input files, will be used as single schema for all input files,
* otherwise, the avro each input file will be determined and splits will be created with respect to the input file's schema
*/
public static final String COMPACTION_JOB_AVRO_SINGLE_INPUT_SCHEMA =
COMPACTION_JOB_PREFIX + "avro.single.input.schema";
/**
* Properties related to the avro dedup compaction job of a dataset.
*/
public static final String COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC = COMPACTION_JOB_PREFIX + "avro.key.schema.loc";
public static final String COMPACTION_JOB_DEDUP_KEY = COMPACTION_JOB_PREFIX + "dedup.key";
public static final String COMPACTION_JOB_KEY_FIELD_BLACKLIST = COMPACTION_JOB_PREFIX + "key.fieldBlacklist";
private static final String AVRO = "avro";
private static final String SCHEMA_DEDUP_FIELD_ANNOTATOR = "primarykey";
public enum DedupKeyOption {
// Use all fields in the topic schema
ALL,
// Use fields in the topic schema whose docs match "(?i).*primarykey".
// If there's no such field, option ALL will be used.
KEY,
// Provide a custom dedup schema through property "avro.key.schema.loc"
CUSTOM
}
public static final DedupKeyOption DEFAULT_DEDUP_KEY_OPTION = DedupKeyOption.KEY;
private final boolean useSingleInputSchema;
public MRCompactorAvroKeyDedupJobRunner(Dataset dataset, FileSystem fs) {
super(dataset, fs);
this.useSingleInputSchema = this.dataset.jobProps().getPropAsBoolean(COMPACTION_JOB_AVRO_SINGLE_INPUT_SCHEMA, true);
}
@Override
protected void configureJob(Job job) throws IOException {
super.configureJob(job);
configureSchema(job);
}
private void configureSchema(Job job) throws IOException {
Schema newestSchema = getNewestSchemaFromSource(job, this.fs);
if (this.useSingleInputSchema) {
AvroJob.setInputKeySchema(job, newestSchema);
}
AvroJob.setMapOutputKeySchema(job, this.shouldDeduplicate ? getKeySchema(job, newestSchema) : newestSchema);
AvroJob.setMapOutputValueSchema(job, newestSchema);
AvroJob.setOutputKeySchema(job, newestSchema);
}
/**
* Obtain the schema used for compaction. If compaction.dedup.key=all, it returns topicSchema.
* If compaction.dedup.key=key, it returns a schema composed of all fields in topicSchema
* whose doc matches "(?i).*primarykey". If there's no such field, option "all" will be used.
* If compaction.dedup.key=custom, it reads the schema from compaction.avro.key.schema.loc.
* If the read fails, or if the custom key schema is incompatible with topicSchema, option "key" will be used.
*/
@VisibleForTesting
Schema getKeySchema(Job job, Schema topicSchema) throws IOException {
Schema keySchema = null;
DedupKeyOption dedupKeyOption = getDedupKeyOption();
if (dedupKeyOption == DedupKeyOption.ALL) {
LOG.info("Using all attributes in the schema (except Map, Arrar and Enum fields) for compaction");
keySchema = AvroUtils.removeUncomparableFields(topicSchema).get();
} else if (dedupKeyOption == DedupKeyOption.KEY) {
LOG.info("Using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(getKeySchema(topicSchema)).get();
} else if (keySchemaFileSpecified()) {
Path keySchemaFile = getKeySchemaFile();
LOG.info("Using attributes specified in schema file " + keySchemaFile + " for compaction");
try {
keySchema = AvroUtils.parseSchemaFromFile(keySchemaFile, this.fs);
} catch (IOException e) {
LOG.error("Failed to parse avro schema from " + keySchemaFile
+ ", using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(getKeySchema(topicSchema)).get();
}
if (!isKeySchemaValid(keySchema, topicSchema)) {
LOG.warn(String.format("Key schema %s is not compatible with record schema %s.", keySchema, topicSchema)
+ "Using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(getKeySchema(topicSchema)).get();
}
} else {
LOG.info("Property " + COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC
+ " not provided. Using key attributes in the schema for compaction");
keySchema = AvroUtils.removeUncomparableFields(getKeySchema(topicSchema)).get();
}
return keySchema;
}
/**
* Returns a schema composed of all fields in topicSchema whose doc match "(?i).*primarykey".
* If there's no such field, topicSchema itself will be returned.
*/
public static Schema getKeySchema(Schema topicSchema) {
Preconditions.checkArgument(topicSchema.getType() == Schema.Type.RECORD);
Optional<Schema> newSchema = getKeySchemaFromRecord(topicSchema);
if (newSchema.isPresent()) {
return newSchema.get();
} else {
LOG.warn(String.format("No field in the schema of %s is annotated as primarykey. Using all fields for deduping",
topicSchema.getName()));
return topicSchema;
}
}
public static Optional<Schema> getKeySchema(Field field) {
switch (field.schema().getType()) {
case RECORD:
return getKeySchemaFromRecord(field.schema());
default:
if (field.doc() != null && field.doc().toLowerCase().endsWith(SCHEMA_DEDUP_FIELD_ANNOTATOR)) {
return Optional.of(field.schema());
} else {
return Optional.absent();
}
}
}
public static Optional<Schema> getKeySchemaFromRecord(Schema record) {
Preconditions.checkArgument(record.getType() == Schema.Type.RECORD);
List<Field> fields = Lists.newArrayList();
for (Field field : record.getFields()) {
Optional<Schema> newFieldSchema = getKeySchema(field);
if (newFieldSchema.isPresent()) {
fields.add(AvroCompatibilityHelper.createSchemaField(field.name(), newFieldSchema.get(), field.doc(),
AvroUtils.getCompatibleDefaultValue(field)));
}
}
if (!fields.isEmpty()) {
Schema newSchema = Schema.createRecord(record.getName(), record.getDoc(), record.getName(), false);
newSchema.setFields(fields);
return Optional.of(newSchema);
} else {
return Optional.absent();
}
}
/**
* keySchema is valid if a record with newestSchema can be converted to a record with keySchema.
*/
public static boolean isKeySchemaValid(Schema keySchema, Schema topicSchema) {
return SchemaCompatibility.checkReaderWriterCompatibility(keySchema, topicSchema).getType()
.equals(SchemaCompatibilityType.COMPATIBLE);
}
public static Schema getNewestSchemaFromSource(Job job, FileSystem fs) throws IOException {
Path[] sourceDirs = FileInputFormat.getInputPaths(job);
List<FileStatus> files = new ArrayList<FileStatus>();
for (Path sourceDir : sourceDirs) {
files.addAll(Arrays.asList(fs.listStatus(sourceDir)));
}
Collections.sort(files, new LastModifiedDescComparator());
for (FileStatus file : files) {
Schema schema = getNewestSchemaFromSource(file.getPath(), fs);
if (schema != null) {
return schema;
}
}
return null;
}
public static Schema getNewestSchemaFromSource(Path sourceDir, FileSystem fs) throws IOException {
FileStatus[] files = fs.listStatus(sourceDir);
Arrays.sort(files, new LastModifiedDescComparator());
for (FileStatus status : files) {
if (status.isDirectory()) {
Schema schema = getNewestSchemaFromSource(status.getPath(), fs);
if (schema != null)
return schema;
} else if (FilenameUtils.isExtension(status.getPath().getName(), AVRO)) {
return AvroUtils.getSchemaFromDataFile(status.getPath(), fs);
}
}
return null;
}
private DedupKeyOption getDedupKeyOption() {
if (!this.dataset.jobProps().contains(COMPACTION_JOB_DEDUP_KEY)) {
return DEFAULT_DEDUP_KEY_OPTION;
}
Optional<DedupKeyOption> option = Enums.getIfPresent(DedupKeyOption.class,
this.dataset.jobProps().getProp(COMPACTION_JOB_DEDUP_KEY).toUpperCase());
return option.isPresent() ? option.get() : DEFAULT_DEDUP_KEY_OPTION;
}
private boolean keySchemaFileSpecified() {
return this.dataset.jobProps().contains(COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC);
}
private Path getKeySchemaFile() {
return new Path(this.dataset.jobProps().getProp(COMPACTION_JOB_AVRO_KEY_SCHEMA_LOC));
}
@Override
protected void setInputFormatClass(Job job) {
job.setInputFormatClass(AvroKeyRecursiveCombineFileInputFormat.class);
}
@Override
protected void setMapperClass(Job job) {
job.setMapperClass(AvroKeyMapper.class);
}
@Override
protected void setMapOutputKeyClass(Job job) {
job.setMapOutputKeyClass(AvroKey.class);
}
@Override
protected void setMapOutputValueClass(Job job) {
job.setMapOutputValueClass(AvroValue.class);
}
@Override
protected void setOutputFormatClass(Job job) {
job.setOutputFormatClass(AvroKeyCompactorOutputFormat.class);
}
@Override
protected void setReducerClass(Job job) {
job.setReducerClass(AvroKeyDedupReducer.class);
}
@Override
protected void setOutputKeyClass(Job job) {
job.setOutputKeyClass(AvroKey.class);
}
@Override
protected void setOutputValueClass(Job job) {
job.setOutputValueClass(NullWritable.class);
}
@Override
protected Collection<String> getApplicableFileExtensions() {
return Lists.newArrayList(AVRO);
}
/**
* A Comparator for reverse order comparison of modification time of two FileStatus.
*/
public static class LastModifiedDescComparator implements Comparator<FileStatus>, Serializable {
private static final long serialVersionUID = 1L;
@Override
public int compare(FileStatus fs1, FileStatus fs2) {
if (fs2.getModificationTime() < fs1.getModificationTime()) {
return -1;
} else if (fs2.getModificationTime() > fs1.getModificationTime()) {
return 1;
} else {
return 0;
}
}
}
}
| 1,874 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/GobblinOrcMapreduceRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.orc.StripeInformation;
import org.apache.orc.Writer;
import org.apache.orc.mapreduce.OrcMapreduceRecordWriter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.reflection.RestrictedFieldAccessingUtils;
/**
* A thin extension to {@link OrcMapreduceRecordWriter} for obtaining a vector of stripe information.
*/
@Slf4j
public class GobblinOrcMapreduceRecordWriter extends OrcMapreduceRecordWriter {
public GobblinOrcMapreduceRecordWriter(Writer writer) {
super(writer);
}
public GobblinOrcMapreduceRecordWriter(Writer writer, int rowBatchSize) {
super(writer, rowBatchSize);
}
@Override
public void close(TaskAttemptContext taskAttemptContext)
throws IOException {
super.close(taskAttemptContext);
// TODO: Emit this information as kafka events for ease for populating dashboard.
try {
String stripeSizeVec = ((Writer) RestrictedFieldAccessingUtils.getRestrictedFieldByReflection(
this, "writer", this.getClass())).getStripes()
.stream()
.mapToLong(StripeInformation::getDataLength).mapToObj(String::valueOf)
.reduce((x,y) -> x.concat(",").concat(y)).get();
log.info("The vector of Stripe-Size in enclosing writer is:" + stripeSizeVec);
} catch (NoSuchFieldException | IllegalAccessException e) {
log.error("Failed to access writer object from super class to obtain stripe information");
}
}
}
| 1,875 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueCombineFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import org.apache.gobblin.compaction.mapreduce.CompactionCombineFileInputFormat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.orc.mapred.OrcValue;
public class OrcValueCombineFileInputFormat extends CompactionCombineFileInputFormat<NullWritable, OrcValue> {
@Override
public RecordReader<NullWritable, OrcValue> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException {
return new CombineFileRecordReader((CombineFileSplit) split, context, OrcValueCombineFileRecordReader.class);
}
}
| 1,876 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcKeyCompactorOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.orc.OrcFile;
import org.apache.orc.Writer;
import org.apache.orc.mapreduce.OrcMapreduceRecordWriter;
import org.apache.orc.mapreduce.OrcOutputFormat;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter;
import org.apache.gobblin.writer.GobblinOrcMemoryManager;
import org.apache.gobblin.writer.GobblinOrcWriterConfigs;
import static org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter.COMPACTION_OUTPUT_EXTENSION;
/**
* Extension of {@link OrcOutputFormat} for customized {@link CompactorOutputCommitter}
*/
@Slf4j
public class OrcKeyCompactorOutputFormat extends OrcOutputFormat {
private FileOutputCommitter committer = null;
@Override
public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
if (this.committer == null) {
this.committer = new CompactorOutputCommitter(FileOutputFormat.getOutputPath(context), context);
}
return this.committer;
}
/**
* Required for extension since super method hard-coded file extension as ".orc". To keep flexibility
* of extension name, we made it configuration driven.
* @param taskAttemptContext The source of configuration that determines the file extension
* @return The {@link RecordWriter} that write out Orc object.
* @throws IOException
*/
@Override
public RecordWriter getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException {
Configuration conf = taskAttemptContext.getConfiguration();
String extension = "." + conf.get(COMPACTION_OUTPUT_EXTENSION, "orc" );
Path filename = getDefaultWorkFile(taskAttemptContext, extension);
Writer writer = OrcFile.createWriter(filename,
org.apache.orc.mapred.OrcOutputFormat.buildOptions(conf).memory(new GobblinOrcMemoryManager(conf)));
int rowBatchSize = conf.getInt(GobblinOrcWriterConfigs.ORC_WRITER_BATCH_SIZE, GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCH_SIZE);
log.info("Creating OrcMapreduceRecordWriter with row batch size = {}", rowBatchSize);
return new OrcMapreduceRecordWriter(writer, rowBatchSize);
}
}
| 1,877 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Arrays;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcValue;
import org.apache.orc.mapreduce.OrcMapreduceRecordReader;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.RecordKeyMapperBase;
import static org.apache.orc.OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA;
/**
* To keep consistent with {@link OrcMapreduceRecordReader}'s decision on implementing
* {@link RecordReader} with {@link NullWritable} as the key and generic type of value, the ORC Mapper will
* read in the record as the input value.
*/
@Slf4j
public class OrcValueMapper extends RecordKeyMapperBase<NullWritable, OrcStruct, Object, OrcValue> {
// This key will only be initialized lazily when dedup is enabled.
private OrcKey outKey;
private OrcValue outValue;
private TypeDescription mrInputSchema;
private TypeDescription shuffleKeySchema;
private JobConf jobConf;
// This is added mostly for debuggability.
private static int writeCount = 0;
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
super.setup(context);
this.jobConf = new JobConf(context.getConfiguration());
this.outKey = new OrcKey();
this.outKey.configure(jobConf);
this.outValue = new OrcValue();
this.outValue.configure(jobConf);
// This is the consistent input-schema among all mappers.
this.mrInputSchema =
TypeDescription.fromString(context.getConfiguration().get(OrcConf.MAPRED_INPUT_SCHEMA.getAttribute()));
this.shuffleKeySchema =
TypeDescription.fromString(context.getConfiguration().get(MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()));
}
@Override
protected void map(NullWritable key, OrcStruct orcStruct, Context context)
throws IOException, InterruptedException {
// Up-convert OrcStruct only if schema differs
if (!orcStruct.getSchema().equals(this.mrInputSchema)) {
// Note that outValue.value is being re-used.
log.info("There's a schema difference between output schema and input schema");
OrcUtils.upConvertOrcStruct(orcStruct, (OrcStruct) outValue.value, mrInputSchema);
} else {
this.outValue.value = orcStruct;
}
try {
if (context.getNumReduceTasks() == 0) {
context.write(NullWritable.get(), this.outValue);
} else {
fillDedupKey(orcStruct);
context.write(this.outKey, this.outValue);
}
} catch (Exception e) {
String inputPathInString = getInputsplitHelper(context);
throw new RuntimeException("Failure in write record no." + writeCount + " the processing split is:" + inputPathInString, e);
}
writeCount += 1;
context.getCounter(EVENT_COUNTER.RECORD_COUNT).increment(1);
}
private String getInputsplitHelper(Context context) {
try {
Field mapContextField = WrappedMapper.Context.class.getDeclaredField("mapContext");
mapContextField.setAccessible(true);
Path[] inputPaths = ((CombineFileSplit) ((MapContextImpl) mapContextField.get((WrappedMapper.Context) context))
.getInputSplit()).getPaths();
return Arrays.toString(inputPaths);
} catch (NoSuchFieldException | IllegalAccessException ie) {
throw new RuntimeException(ie);
}
}
/**
* By default, dedup key contains the whole ORC record, except MAP since {@link org.apache.orc.mapred.OrcMap} is
* an implementation of {@link java.util.TreeMap} which doesn't accept difference of records within the map in comparison.
* Note: This method should have no side-effect on input record.
*/
private void fillDedupKey(OrcStruct originalRecord) {
if (!originalRecord.getSchema().equals(this.shuffleKeySchema)) {
OrcUtils.upConvertOrcStruct(originalRecord, (OrcStruct) this.outKey.key, this.shuffleKeySchema);
} else {
this.outKey.key = originalRecord;
}
}
}
| 1,878 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcKeyComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcStruct;
/**
* Compare {@link OrcKey} in shuffle of MapReduce.
* Delegate byte decoding to underlying {@link OrcStruct#readFields(DataInput)} method to simplify comparison.
*/
public class OrcKeyComparator extends Configured implements RawComparator<OrcKey> {
private TypeDescription schema;
private OrcKey key1;
private OrcKey key2;
private DataInputBuffer buffer;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (null != conf) {
// The MapReduce framework will be using this comparator to sort OrcKey objects
// output from the map phase, so use the schema defined for the map output key
// and the data model non-raw compare() implementation.
schema = TypeDescription.fromString(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()));
OrcStruct orcRecordModel1 = (OrcStruct) OrcStruct.createValue(schema);
OrcStruct orcRecordModel2 = (OrcStruct) OrcStruct.createValue(schema);
if (key1 == null) {
key1 = new OrcKey();
}
if (key2 == null) {
key2 = new OrcKey();
}
if (buffer == null) {
buffer = new DataInputBuffer();
}
key1.key = orcRecordModel1;
key2.key = orcRecordModel2;
}
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1); // parse key1
key1.readFields(buffer);
buffer.reset(b2, s2, l2); // parse key2
key2.readFields(buffer);
} catch (IOException e) {
throw new RuntimeException(e);
}
return compare(key1, key2); // compare them
}
@Override
public int compare(OrcKey o1, OrcKey o2) {
if (!(o1.key instanceof OrcStruct) || !(o2.key instanceof OrcStruct)) {
throw new IllegalStateException("OrcKey should have its key value be instance of OrcStruct");
}
return ((OrcStruct) o1.key).compareTo((OrcStruct) o2.key);
}
}
| 1,879 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcKeyDedupReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
/**
* Check record duplicates in reducer-side.
*/
@Slf4j
public class OrcKeyDedupReducer extends RecordKeyDedupReducerBase<OrcKey, OrcValue, NullWritable, OrcValue> {
@VisibleForTesting
public static final String ORC_DELTA_SCHEMA_PROVIDER =
"org.apache.gobblin.compaction." + OrcKeyDedupReducer.class.getSimpleName() + ".deltaFieldsProvider";
public static final String USING_WHOLE_RECORD_FOR_COMPARE = "usingWholeRecordForCompareInReducer";
private int recordCounter = 0;
@Override
protected void setOutValue(OrcValue valueToRetain) {
// Better to copy instead reassigning reference.
outValue.value = valueToRetain.value;
}
@Override
protected void setOutKey(OrcValue valueToRetain) {
// do nothing since initReusableObject has assigned value for outKey.
}
@Override
protected void reduce(OrcKey key, Iterable<OrcValue> values, Context context)
throws IOException, InterruptedException {
/* Map from hash of value(Typed in OrcStruct) object to its times of duplication*/
Map<Integer, Integer> valuesToRetain = new HashMap<>();
int valueHash = 0;
if (recordCounter == 0) {
log.info("Starting to reduce values for the first key {}", key);
}
for (OrcValue value : values) {
if (recordCounter == 1) {
log.info("Reduced first value");
}
recordCounter++;
if (recordCounter % 1000 == 0) {
log.info("Reduced {} values so far", recordCounter);
}
valueHash = ((OrcStruct) value.value).hashCode();
if (valuesToRetain.containsKey(valueHash)) {
valuesToRetain.put(valueHash, valuesToRetain.get(valueHash) + 1);
} else {
valuesToRetain.put(valueHash, 1);
writeRetainedValue(value, context);
}
}
/* At this point, keyset of valuesToRetain should contains all different OrcValue. */
for (Map.Entry<Integer, Integer> entry : valuesToRetain.entrySet()) {
updateCounters(entry.getValue(), context);
}
}
@Override
protected void initDeltaComparator(Configuration conf) {
deltaComparatorOptional = Optional.absent();
}
@Override
protected void initReusableObject() {
outKey = NullWritable.get();
outValue = new OrcValue();
}
}
| 1,880 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueCombineFileRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapreduce.OrcMapreduceRecordReader;
public class OrcValueCombineFileRecordReader extends OrcMapreduceRecordReader {
private final CombineFileSplit split;
private final Integer splitIdx;
public OrcValueCombineFileRecordReader(CombineFileSplit split, TaskAttemptContext context, Integer idx)
throws IOException {
this(getRecordReaderFromFile(split, context, idx), getSchema(split, context, idx), split, idx);
}
public OrcValueCombineFileRecordReader(RecordReader reader, TypeDescription schema, CombineFileSplit split,
Integer splitIdx)
throws IOException {
super(reader, schema);
this.split = split;
this.splitIdx = splitIdx;
}
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) {
super.initialize(new FileSplit(this.split.getPath(this.splitIdx), this.split.getOffset(this.splitIdx),
this.split.getLength(this.splitIdx), null), taskAttemptContext);
}
private static TypeDescription getSchema(CombineFileSplit split, TaskAttemptContext context, Integer idx)
throws IOException {
Path path = split.getPath(idx);
return OrcUtils.getTypeDescriptionFromFile(context.getConfiguration(), path);
}
private static RecordReader getRecordReaderFromFile(CombineFileSplit split, TaskAttemptContext context, Integer idx)
throws IOException {
Path path = split.getPath(idx);
// One should avoid using rows() without passing Reader.Options object as the configuration for RecordReader.
// Note that it is different from OrcFile Reader that getFileReader returns.
Reader.Options options = new Reader.Options(context.getConfiguration());
return OrcUtils.getFileReader(context.getConfiguration(), path)
.rows(options.range(split.getOffset(idx), split.getLength(idx)));
}
}
| 1,881 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.ConvertTreeReaderFactory;
import org.apache.orc.impl.SchemaEvolution;
import org.apache.orc.mapred.OrcList;
import org.apache.orc.mapred.OrcMap;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcTimestamp;
import org.apache.orc.mapred.OrcUnion;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.mapreduce.avro.MRCompactorAvroKeyDedupJobRunner;
import org.apache.gobblin.util.FileListUtils;
@Slf4j
public class OrcUtils {
// For Util class to prevent initialization
private OrcUtils() {
}
public static TypeDescription getTypeDescriptionFromFile(Configuration conf, Path orcFilePath)
throws IOException {
return getFileReader(conf, orcFilePath).getSchema();
}
/**
* @deprecated Since the method name isn't accurate. Please calling {@link this#getFileReader(Configuration, Path)}
* directly
*/
@Deprecated
public static Reader getRecordReaderFromFile(Configuration conf, Path orcFilePath) throws IOException {
return getFileReader(conf, orcFilePath);
}
public static Reader getFileReader(Configuration conf, Path orcFilePath)
throws IOException {
return OrcFile.createReader(orcFilePath, new OrcFile.ReaderOptions(conf));
}
public static TypeDescription getNewestSchemaFromSource(Job job, FileSystem fs)
throws IOException {
Path[] sourceDirs = FileInputFormat.getInputPaths(job);
if (sourceDirs.length == 0) {
throw new IllegalStateException("There should be at least one directory specified for the MR job");
}
List<FileStatus> files = new ArrayList<FileStatus>();
for (Path sourceDir : sourceDirs) {
files.addAll(FileListUtils.listFilesRecursively(fs, sourceDir));
}
Collections.sort(files, new MRCompactorAvroKeyDedupJobRunner.LastModifiedDescComparator());
TypeDescription resultSchema;
for (FileStatus status : files) {
resultSchema = getTypeDescriptionFromFile(job.getConfiguration(), status.getPath());
if (resultSchema != null) {
return resultSchema;
}
}
throw new IllegalStateException(String
.format("There's no file carrying orc file schema in the list of directories: %s",
Arrays.toString(sourceDirs)));
}
/**
* Determine if two types are following valid evolution.
* Implementation taken and manipulated from {@link SchemaEvolution} as that was package-private.
*/
static boolean isEvolutionValid(TypeDescription fileType, TypeDescription readerType) {
boolean isOk = true;
if (fileType.getCategory() == readerType.getCategory()) {
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case STRING:
case TIMESTAMP:
case BINARY:
case DATE:
// these are always a match
break;
case CHAR:
case VARCHAR:
break;
case DECIMAL:
break;
case UNION:
case MAP:
case LIST: {
// these must be an exact match
List<TypeDescription> fileChildren = fileType.getChildren();
List<TypeDescription> readerChildren = readerType.getChildren();
if (fileChildren.size() == readerChildren.size()) {
for (int i = 0; i < fileChildren.size(); ++i) {
isOk &= isEvolutionValid(fileChildren.get(i), readerChildren.get(i));
}
return isOk;
} else {
return false;
}
}
case STRUCT: {
List<TypeDescription> readerChildren = readerType.getChildren();
List<TypeDescription> fileChildren = fileType.getChildren();
List<String> readerFieldNames = readerType.getFieldNames();
List<String> fileFieldNames = fileType.getFieldNames();
final Map<String, TypeDescription> fileTypesIdx = new HashMap();
for (int i = 0; i < fileFieldNames.size(); i++) {
final String fileFieldName = fileFieldNames.get(i);
fileTypesIdx.put(fileFieldName, fileChildren.get(i));
}
for (int i = 0; i < readerFieldNames.size(); i++) {
final String readerFieldName = readerFieldNames.get(i);
TypeDescription readerField = readerChildren.get(i);
TypeDescription fileField = fileTypesIdx.get(readerFieldName);
if (fileField == null) {
continue;
}
isOk &= isEvolutionValid(fileField, readerField);
}
return isOk;
}
default:
throw new IllegalArgumentException("Unknown type " + readerType);
}
return isOk;
} else {
/*
* Check for the few cases where will not convert....
*/
return ConvertTreeReaderFactory.canConvert(fileType, readerType);
}
}
/**
* This method copies value in object {@param w} into object {@param v} recursively even if the schema of w and v
* differs in a compatible way, meaning if there's a field existing in v but not in w, the null value will be filled.
* It served as a helper method for {@link #upConvertOrcStruct(OrcStruct, OrcStruct, TypeDescription)} when OrcStruct
* contains nested structure as a member.
*
* Suppress the warning of type checking: All casts are clearly valid as they are all (sub)elements Orc types.
* Check failure will trigger Cast exception and blow up the process.
*/
@SuppressWarnings("unchecked")
private static WritableComparable structConversionHelper(WritableComparable w, WritableComparable v,
TypeDescription targetSchema) {
if (w instanceof OrcStruct) {
upConvertOrcStruct((OrcStruct) w, (OrcStruct) v, targetSchema);
} else if (w instanceof OrcList) {
OrcList castedList = (OrcList) w;
OrcList targetList = (OrcList) v;
TypeDescription elementType = targetSchema.getChildren().get(0);
targetList.clear();
for (int i = 0; i < castedList.size(); i++) {
WritableComparable targetListRecordContainer = createValueRecursively(elementType, 0);
targetList.add(i,
structConversionHelper((WritableComparable) castedList.get(i), targetListRecordContainer, elementType));
}
} else if (w instanceof OrcMap) {
OrcMap castedMap = (OrcMap) w;
OrcMap targetMap = (OrcMap) v;
TypeDescription valueSchema = targetSchema.getChildren().get(1);
targetMap.clear();
for (Object entry : castedMap.entrySet()) {
Map.Entry<WritableComparable, WritableComparable> castedEntry =
(Map.Entry<WritableComparable, WritableComparable>) entry;
WritableComparable targetMapRecordContainer = createValueRecursively(valueSchema);
targetMapRecordContainer =
structConversionHelper(castedEntry.getValue(), targetMapRecordContainer, valueSchema);
targetMap.put(castedEntry.getKey(), targetMapRecordContainer);
}
} else if (w instanceof OrcUnion) {
OrcUnion castedUnion = (OrcUnion) w;
OrcUnion targetUnion = (OrcUnion) v;
byte tag = castedUnion.getTag();
// ORC doesn't support Union type widening
// Avro doesn't allow it either, reference: https://avro.apache.org/docs/current/spec.html#Schema+Resolution
// As a result, member schema within source and target should be identical.
TypeDescription targetMemberSchema = targetSchema.getChildren().get(tag);
targetUnion.set(tag, structConversionHelper((WritableComparable) castedUnion.getObject(),
(WritableComparable) OrcUtils.createValueRecursively(targetMemberSchema), targetMemberSchema));
} else {
// Regardless whether type-widening is happening or not, this method copy the value of w into v.
handlePrimitiveWritableComparable(w, v);
}
// If non-primitive or type-widening is required, v should already be populated by w's value recursively.
return v;
}
/**
* Recursively convert the {@param oldStruct} into {@param newStruct} whose schema is {@param targetSchema}.
* This serves similar purpose like GenericDatumReader for Avro, which accepts an reader schema and writer schema
* to allow users convert bytes into reader's schema in a compatible approach.
* Calling this method SHALL NOT cause any side-effect for {@param oldStruct}, also it will copy value of each fields
* in {@param oldStruct} into {@param newStruct} recursively. Please ensure avoiding unnecessary call as it could
* be pretty expensive if the struct schema is complicated, or contains container objects like array/map.
*
* Note that if newStruct containing things like List/Map (container-type), the up-conversion is doing two things:
* 1. Clear all elements in original containers.
* 2. Make value of container elements in {@param oldStruct} is populated into {@param newStruct} with element-type
* in {@param newStruct} if compatible.
*
* Limitation:
* 1. Does not support up-conversion of key types in Maps. The underlying reasoning is because of the primary format
* from upstream is Avro, which enforces key-type to be string only.
* 2. Conversion from a field A to field B only happens if
* org.apache.gobblin.compaction.mapreduce.orc.OrcValueMapper#isEvolutionValid(A,B) return true.
*/
@VisibleForTesting
public static void upConvertOrcStruct(OrcStruct oldStruct, OrcStruct newStruct, TypeDescription targetSchema) {
// If target schema is not equal to newStruct's schema, it is a illegal state and doesn't make sense to work through.
Preconditions.checkArgument(newStruct.getSchema().equals(targetSchema));
int indexInNewSchema = 0;
List<String> oldSchemaFieldNames = oldStruct.getSchema().getFieldNames();
/* Construct a fieldName -> Index map to efficient access within the loop below. */
Map<String, Integer> oldSchemaIndex = IntStream.range(0, oldSchemaFieldNames.size()).boxed()
.collect(Collectors.toMap(oldSchemaFieldNames::get, Function.identity()));
List<TypeDescription> oldSchemaTypes = oldStruct.getSchema().getChildren();
List<TypeDescription> newSchemaTypes = targetSchema.getChildren();
for (String fieldName : targetSchema.getFieldNames()) {
if (oldSchemaFieldNames.contains(fieldName) && oldStruct.getFieldValue(fieldName) != null) {
int fieldIndex = oldSchemaIndex.get(fieldName);
TypeDescription oldFieldSchema = oldSchemaTypes.get(fieldIndex);
TypeDescription newFieldSchema = newSchemaTypes.get(indexInNewSchema);
if (isEvolutionValid(oldFieldSchema, newFieldSchema)) {
WritableComparable oldField = oldStruct.getFieldValue(fieldName);
WritableComparable newField = newStruct.getFieldValue(fieldName);
newField = (newField == null) ? OrcUtils.createValueRecursively(newFieldSchema) : newField;
newStruct.setFieldValue(fieldName, structConversionHelper(oldField, newField, newFieldSchema));
} else {
throw new SchemaEvolution.IllegalEvolutionException(String
.format("ORC does not support type conversion from file" + " type %s to reader type %s ",
oldFieldSchema.toString(), newFieldSchema.toString()));
}
} else {
newStruct.setFieldValue(fieldName, null);
}
indexInNewSchema++;
}
}
/**
* Copy the value of {@param from} object into {@param to} with supporting of type-widening that ORC allowed.
*/
public static void handlePrimitiveWritableComparable(WritableComparable from, WritableComparable to) {
if (from instanceof ByteWritable) {
if (to instanceof ByteWritable) {
((ByteWritable) to).set(((ByteWritable) from).get());
return;
} else if (to instanceof ShortWritable) {
((ShortWritable) to).set(((ByteWritable) from).get());
return;
} else if (to instanceof IntWritable) {
((IntWritable) to).set(((ByteWritable) from).get());
return;
} else if (to instanceof LongWritable) {
((LongWritable) to).set(((ByteWritable) from).get());
return;
} else if (to instanceof DoubleWritable) {
((DoubleWritable) to).set(((ByteWritable) from).get());
return;
}
} else if (from instanceof ShortWritable) {
if (to instanceof ShortWritable) {
((ShortWritable) to).set(((ShortWritable) from).get());
return;
} else if (to instanceof IntWritable) {
((IntWritable) to).set(((ShortWritable) from).get());
return;
} else if (to instanceof LongWritable) {
((LongWritable) to).set(((ShortWritable) from).get());
return;
} else if (to instanceof DoubleWritable) {
((DoubleWritable) to).set(((ShortWritable) from).get());
return;
}
} else if (from instanceof IntWritable) {
if (to instanceof IntWritable) {
((IntWritable) to).set(((IntWritable) from).get());
return;
} else if (to instanceof LongWritable) {
((LongWritable) to).set(((IntWritable) from).get());
return;
} else if (to instanceof DoubleWritable) {
((DoubleWritable) to).set(((IntWritable) from).get());
return;
}
} else if (from instanceof LongWritable) {
if (to instanceof LongWritable) {
((LongWritable) to).set(((LongWritable) from).get());
return;
} else if (to instanceof DoubleWritable) {
((DoubleWritable) to).set(((LongWritable) from).get());
return;
}
// Following from this branch, type-widening is not allowed and only value-copy will happen.
} else if (from instanceof DoubleWritable) {
if (to instanceof DoubleWritable) {
((DoubleWritable) to).set(((DoubleWritable) from).get());
return;
}
} else if (from instanceof BytesWritable) {
if (to instanceof BytesWritable) {
((BytesWritable) to).set((BytesWritable) from);
return;
}
} else if (from instanceof FloatWritable) {
if (to instanceof FloatWritable) {
((FloatWritable) to).set(((FloatWritable) from).get());
return;
}
} else if (from instanceof Text) {
if (to instanceof Text) {
((Text) to).set((Text) from);
return;
}
} else if (from instanceof DateWritable) {
if (to instanceof DateWritable) {
((DateWritable) to).set(((DateWritable) from).get());
return;
}
} else if (from instanceof OrcTimestamp && to instanceof OrcTimestamp) {
((OrcTimestamp) to).set(((OrcTimestamp) from).toString());
return;
} else if (from instanceof HiveDecimalWritable && to instanceof HiveDecimalWritable) {
((HiveDecimalWritable) to).set(((HiveDecimalWritable) from).getHiveDecimal());
return;
} else if (from instanceof BooleanWritable && to instanceof BooleanWritable) {
((BooleanWritable) to).set(((BooleanWritable) from).get());
return;
}
throw new UnsupportedOperationException(String
.format("The conversion of primitive-type WritableComparable object from %s to %s is not supported",
from.getClass(), to.getClass()));
}
/**
* For nested structure like struct<a:array<struct<int,string>>>, calling OrcStruct.createValue doesn't create entry for the inner
* list, which would be required to assign a value if the entry-type has nested structure, or it just cannot see the
* entry's nested structure.
*
* This function should be fed back to open-source ORC.
*/
public static WritableComparable createValueRecursively(TypeDescription schema, int elemNum) {
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanWritable();
case BYTE:
return new ByteWritable();
case SHORT:
return new ShortWritable();
case INT:
return new IntWritable();
case LONG:
return new LongWritable();
case FLOAT:
return new FloatWritable();
case DOUBLE:
return new DoubleWritable();
case BINARY:
return new BytesWritable();
case CHAR:
case VARCHAR:
case STRING:
return new Text();
case DATE:
return new DateWritable();
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new OrcTimestamp();
case DECIMAL:
return new HiveDecimalWritable();
case STRUCT: {
OrcStruct result = new OrcStruct(schema);
int c = 0;
for (TypeDescription child : schema.getChildren()) {
result.setFieldValue(c++, createValueRecursively(child, elemNum));
}
return result;
}
case UNION: {
// For union, there's no way to determine which tag's object type to create with only schema.
// It can be determined in the cases when a OrcUnion's value needs to be copied to another object recursively,
// and the source OrcUnion can provide this information.
return new OrcUnion(schema);
}
case LIST: {
OrcList result = new OrcList(schema);
for (int i = 0; i < elemNum; i++) {
result.add(createValueRecursively(schema.getChildren().get(0), elemNum));
}
return result;
}
case MAP: {
OrcMap result = new OrcMap(schema);
for (int i = 0; i < elemNum; i++) {
result.put(createValueRecursively(schema.getChildren().get(0), elemNum),
createValueRecursively(schema.getChildren().get(1), elemNum));
}
return result;
}
default:
throw new IllegalArgumentException("Unknown type " + schema);
}
}
public static WritableComparable createValueRecursively(TypeDescription schema) {
return createValueRecursively(schema, 1);
}
/**
* Check recursively if owning schema is eligible to be up-converted to targetSchema if
* TargetSchema is a subset of originalSchema
*/
public static boolean eligibleForUpConvertHelper(TypeDescription originalSchema, TypeDescription targetSchema) {
if (!targetSchema.getCategory().isPrimitive()) {
if (originalSchema.getCategory() != targetSchema.getCategory()) {
return false;
}
if (targetSchema.getCategory().equals(TypeDescription.Category.LIST)) {
Preconditions
.checkArgument(originalSchema.getChildren() != null, "Illegal format of ORC schema as:" + originalSchema);
return eligibleForUpConvertHelper(originalSchema.getChildren().get(0), targetSchema.getChildren().get(0));
} else if (targetSchema.getCategory().equals(TypeDescription.Category.MAP)) {
Preconditions
.checkArgument(originalSchema.getChildren() != null, "Illegal format of ORC schema as:" + originalSchema);
return eligibleForUpConvertHelper(originalSchema.getChildren().get(0), targetSchema.getChildren().get(0))
&& eligibleForUpConvertHelper(originalSchema.getChildren().get(1), targetSchema.getChildren().get(1));
} else if (targetSchema.getCategory().equals(TypeDescription.Category.UNION)) {
// we don't project into union as shuffle key.
return true;
} else if (targetSchema.getCategory().equals(TypeDescription.Category.STRUCT)) {
if (!originalSchema.getFieldNames().containsAll(targetSchema.getFieldNames())) {
return false;
}
boolean result = true;
for (int i = 0; i < targetSchema.getFieldNames().size(); i++) {
String subSchemaFieldName = targetSchema.getFieldNames().get(i);
result &= eligibleForUpConvertHelper(originalSchema.findSubtype(subSchemaFieldName),
targetSchema.getChildren().get(i));
}
return result;
} else {
// There are totally 5 types of non-primitive. If falling into this branch, it means it is a TIMESTAMP_INSTANT
// and we will by default treated it as eligible.
return true;
}
} else {
// Check the unit type: Only for the category.
return originalSchema.getCategory().equals(targetSchema.getCategory());
}
}
// Eligibility for up-conversion: If targetSchema is a subset of originalSchema (Schema projection)
// and vice-versa (schema expansion).
public static boolean eligibleForUpConvert(TypeDescription originalSchema, TypeDescription targetSchema) {
return eligibleForUpConvertHelper(originalSchema, targetSchema) || eligibleForUpConvertHelper(targetSchema,
originalSchema);
}
}
| 1,882 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/event/CompactionSlaEventHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.event;
import com.google.common.base.Optional;
import java.io.IOException;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase;
import org.apache.gobblin.compaction.mapreduce.RecordKeyMapperBase;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.metrics.event.sla.SlaEventSubmitter;
import org.apache.gobblin.metrics.event.sla.SlaEventSubmitter.SlaEventSubmitterBuilder;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to build compaction sla event metadata.
*/
public class CompactionSlaEventHelper {
private static final Logger LOG = LoggerFactory.getLogger(CompactionSlaEventHelper.class);
public static final String RECOMPATED_METADATA_NAME = "recompacted";
public static final String DATASET_URN = "datasetUrn";
public static final String DATASET_OUTPUT_PATH = "datasetOutputPath";
public static final String LATE_RECORD_COUNT = "lateRecordCount";
public static final String REGULAR_RECORD_COUNT = "regularRecordCount";
public static final String NEED_RECOMPACT = "needRecompact";
public static final String PREV_RECORD_COUNT_TOTAL = "prevRecordCountTotal";
public static final String LAST_RUN_START_TIME = "lastRunStartTime";
public static final String EXEC_COUNT_TOTAL = "executionCountTotal";
public static final String MR_JOB_ID = "mrJobId";
public static final String RECORD_COUNT_TOTAL = "recordCountTotal";
public static final String DUPLICATE_COUNT_TOTAL = "DuplicateRecordCount";
public static final String HIVE_REGISTRATION_PATHS = "hiveRegistrationPaths";
public static final String RENAME_DIR_PATHS = "renameDirPaths";
public static final String COMPACTION_COMPLETED_EVENT_NAME = "CompactionCompleted";
public static final String COMPACTION_FAILED_EVENT_NAME = "CompactionFailed";
public static final String COMPLETION_VERIFICATION_FAILED_EVENT_NAME = "CompletenessCannotBeVerified";
public static final String COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME = "CompletenessVerified";
public static final String COMPACTION_RECORD_COUNT_EVENT = "CompactionRecordCounts";
public static final String COMPACTION_HIVE_REGISTRATION_EVENT = "CompactionHiveRegistration";
public static final String COMPACTION_MARK_DIR_EVENT = "CompactionMarkDirComplete";
/**
* Get an {@link SlaEventSubmitterBuilder} that has dataset urn, partition, record count, previous publish timestamp
* and dedupe status set.
* The caller MUST set eventSubmitter, eventname before submitting.
*/
public static SlaEventSubmitterBuilder getEventSubmitterBuilder(Dataset dataset, Optional<Job> job, FileSystem fs) {
SlaEventSubmitterBuilder builder =
SlaEventSubmitter.builder().datasetUrn(dataset.getUrn())
.partition(dataset.jobProps().getProp(MRCompactor.COMPACTION_JOB_DEST_PARTITION, ""))
.dedupeStatus(getOutputDedupeStatus(dataset.jobProps()));
long previousPublishTime = getPreviousPublishTime(dataset, fs);
long upstreamTime = dataset.jobProps().getPropAsLong(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, -1l);
long recordCount = getRecordCount(job);
// Previous publish only exists when this is a recompact job
if (previousPublishTime != -1l) {
builder.previousPublishTimestamp(Long.toString(previousPublishTime));
}
// Upstream time is the logical time represented by the compaction input directory
if (upstreamTime != -1l) {
builder.upstreamTimestamp(Long.toString(upstreamTime));
}
if (recordCount != -1l) {
builder.recordCount(Long.toString(recordCount));
}
return builder;
}
/**
* {@link Deprecated} use {@link #getEventSubmitterBuilder(Dataset, Optional, FileSystem)}
*/
@Deprecated
public static void populateState(Dataset dataset, Optional<Job> job, FileSystem fs) {
dataset.jobProps().setProp(SlaEventKeys.DATASET_URN_KEY, dataset.getUrn());
dataset.jobProps().setProp(SlaEventKeys.PARTITION_KEY,
dataset.jobProps().getProp(MRCompactor.COMPACTION_JOB_DEST_PARTITION, ""));
dataset.jobProps().setProp(SlaEventKeys.DEDUPE_STATUS_KEY, getOutputDedupeStatus(dataset.jobProps()));
dataset.jobProps().setProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY, getPreviousPublishTime(dataset, fs));
dataset.jobProps().setProp(SlaEventKeys.RECORD_COUNT_KEY, getRecordCount(job));
}
public static void setUpstreamTimeStamp(State state, long time) {
state.setProp(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, Long.toString(time));
}
private static long getPreviousPublishTime(Dataset dataset, FileSystem fs) {
Path compactionCompletePath = new Path(dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME);
try {
return fs.getFileStatus(compactionCompletePath).getModificationTime();
} catch (IOException e) {
LOG.debug("Failed to get previous publish time.", e);
}
return -1l;
}
private static String getOutputDedupeStatus(State state) {
return state.getPropAsBoolean(MRCompactor.COMPACTION_OUTPUT_DEDUPLICATED,
MRCompactor.DEFAULT_COMPACTION_OUTPUT_DEDUPLICATED) ? DedupeStatus.DEDUPED.toString()
: DedupeStatus.NOT_DEDUPED.toString();
}
private static long getRecordCount(Optional<Job> job) {
if (!job.isPresent()) {
return -1l;
}
Counters counters = null;
try {
counters = job.get().getCounters();
} catch (IOException e) {
LOG.debug("Failed to get job counters. Record count will not be set. ", e);
return -1l;
}
Counter recordCounter = counters.findCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT);
if (recordCounter != null && recordCounter.getValue() != 0) {
return recordCounter.getValue();
}
recordCounter = counters.findCounter(RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT);
if (recordCounter != null && recordCounter.getValue() != 0) {
return recordCounter.getValue();
}
LOG.debug("Non zero record count not found in both mapper and reducer counters");
return -1l;
}
}
| 1,883 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/event/DedupeStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.event;
public enum DedupeStatus {
DEDUPED,
NOT_DEDUPED
}
| 1,884 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/CompactorListenerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import java.util.Properties;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
/**
* A factory for creating {@link CompactorListener}s.
*/
@Alpha
public interface CompactorListenerFactory {
/**
* Creates a {@link CompactorListener}, if none are specified returns {@link Optional#absent()}.
*
* @param properties a {@link Properties} object used to create a {@link CompactorListener}.
*
* @return {@link Optional#absent()} if no {@link CompactorListener} is present, else returns a {@link CompactorListener}.
*
* @throws CompactorListenerCreationException if there is a problem creating the {@link CompactorListener}.
*/
public Optional<CompactorListener> createCompactorListener(Properties properties)
throws CompactorListenerCreationException;
}
| 1,885 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/ReflectionCompactorListenerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import org.apache.gobblin.configuration.State;
/**
* Implementation of {@link CompactorListenerFactory} that creates a {@link CompactorListener} using reflection. The
* config key {@link #COMPACTOR_LISTENERS} is used to specify a comma separated list of compactors to use. These
* compactors will be run serially.
*/
public class ReflectionCompactorListenerFactory implements CompactorListenerFactory {
@VisibleForTesting
static final String COMPACTOR_LISTENERS = "compactor.listeners";
@Override
public Optional<CompactorListener> createCompactorListener(Properties properties)
throws CompactorListenerCreationException {
State state = new State(properties);
if (Strings.isNullOrEmpty(state.getProp(COMPACTOR_LISTENERS))) {
return Optional.absent();
}
List<CompactorListener> listeners = new ArrayList<>();
for (String listenerClassName : state.getPropAsList(COMPACTOR_LISTENERS)) {
try {
listeners.add((CompactorListener) ConstructorUtils
.invokeConstructor(Class.forName(listenerClassName), properties));
} catch (ReflectiveOperationException e) {
throw new CompactorListenerCreationException(String
.format("Unable to create CompactorListeners from key \"%s\" with value \"%s\"", COMPACTOR_LISTENERS,
properties.getProperty(COMPACTOR_LISTENERS)), e);
}
}
return Optional.<CompactorListener>of(new SerialCompactorListener(listeners));
}
}
| 1,886 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/CompactorListenerCreationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
/**
* Throw by {@link CompactorListenerFactory} if there is a problem creating a {@link CompactorListener}.
*/
public class CompactorListenerCreationException extends Exception {
private static final long serialVersionUID = 1L;
public CompactorListenerCreationException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,887 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/SerialCompactorListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import java.util.List;
import lombok.AllArgsConstructor;
import org.apache.gobblin.compaction.dataset.Dataset;
@AllArgsConstructor
public class SerialCompactorListener implements CompactorListener {
private final List<CompactorListener> listeners;
@Override
public void onDatasetCompactionCompletion(Dataset dataset) throws Exception {
for (CompactorListener compactorListener : this.listeners) {
compactorListener.onDatasetCompactionCompletion(dataset);
}
}
}
| 1,888 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/CompactorListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.compaction.Compactor;
import org.apache.gobblin.compaction.dataset.Dataset;
/**
* A listener for a {@link Compactor}.
*/
@Alpha
public interface CompactorListener {
/**
* Invoked after the compaction for a {@link Dataset} has been completed.
*
* @param dataset the {@link Dataset} whose compaction completed
*/
public void onDatasetCompactionCompletion(Dataset dataset) throws Exception;
}
| 1,889 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/SimpleCompactorCompletionListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
public class SimpleCompactorCompletionListener implements CompactorCompletionListener {
private static final Logger logger = LoggerFactory.getLogger (SimpleCompactorCompletionListener.class);
private SimpleCompactorCompletionListener (State state) {
}
public void onCompactionCompletion (MRCompactor compactor) {
logger.info(String.format("Compaction (started on : %s) is finished", compactor.getInitializeTime()));
Set<Dataset> datasets = compactor.getDatasets();
for (Dataset dataset: datasets) {
if (dataset.state() != Dataset.DatasetState.COMPACTION_COMPLETE) {
logger.error("Dataset " + dataset.getDatasetName() + " " + dataset.state().name());
}
}
}
@Alias("SimpleCompactorCompletionHook")
public static class Factory implements CompactorCompletionListenerFactory {
@Override public CompactorCompletionListener createCompactorCompactionListener (State state) {
return new SimpleCompactorCompletionListener (state);
}
}
}
| 1,890 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/CompactorCompletionListenerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import org.apache.gobblin.configuration.State;
public interface CompactorCompletionListenerFactory {
CompactorCompletionListener createCompactorCompactionListener (State state);
}
| 1,891 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/listeners/CompactorCompletionListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.listeners;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
public interface CompactorCompletionListener {
void onCompactionCompletion(MRCompactor compactor);
}
| 1,892 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/GobblinYarnTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.Resource;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Utility class
*/
public class GobblinYarnTestUtils {
/**
* A utility method for generating a {@link org.apache.hadoop.fs.FileSystemTestHelper.MockFileSystem} instance
* that can return a delegation token on {@link org.apache.hadoop.fs.FileSystem#getDelegationToken(String)}.
*
* @param service
* @return
* @throws IOException
*/
public static FileSystemTestHelper.MockFileSystem createFileSystemForServiceName(final String service)
throws IOException {
FileSystemTestHelper.MockFileSystem mockFs = new FileSystemTestHelper.MockFileSystem();
Mockito.when(mockFs.getCanonicalServiceName()).thenReturn(service);
Mockito.when(mockFs.getDelegationToken(Mockito.any(String.class))).thenAnswer(new Answer<Token<?>>() {
int unique = 0;
@Override
public Token<?> answer(InvocationOnMock invocation) throws Throwable {
Token<?> token = new Token<TokenIdentifier>();
token.setService(new Text(service));
// use unique value so when we restore from token storage, we can
// tell if it's really the same token
token.setKind(new Text("token" + unique++));
return token;
}
});
return mockFs;
}
/**
* Writes a token file to a given path.
* @param path
* @param serviceName
* @throws IOException
*/
public static void createTokenFileForService(Path path, String serviceName)
throws IOException {
FileSystem fileSystem = createFileSystemForServiceName(serviceName);
Token<?> token = fileSystem.getDelegationToken(serviceName);
Credentials credentials = new Credentials();
credentials.addToken(token.getService(), token);
credentials.writeTokenStorageFile(path, new Configuration());
}
public static YarnContainerRequestBundle createYarnContainerRequest(int n, Resource resource) {
YarnContainerRequestBundle yarnContainerRequestBundle = new YarnContainerRequestBundle();
yarnContainerRequestBundle.add("GobblinKafkaStreaming", n, resource);
return yarnContainerRequestBundle;
}
} | 1,893 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnAutoScalingManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.JobDag;
import org.apache.helix.task.TargetState;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
/**
* Unit tests for {@link YarnAutoScalingManager}
*/
@Test(groups = { "gobblin.yarn" })
public class YarnAutoScalingManagerTest {
// A queue within size == 1 and upperBound == "infinite" should not impact on the execution.
private final static YarnAutoScalingManager.SlidingWindowReservoir noopQueue =
new YarnAutoScalingManager.SlidingWindowReservoir(1, Integer.MAX_VALUE);
private final static int defaultContainerMemory = 1024;
private final static int defaultContainerCores = 2;
private final static String defaultHelixTag = "DefaultHelixTag";
/**
* Test for one workflow with one job
*/
@Test
public void testOneJob() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows()).thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinClusterManager", "GobblinYarnTaskRunner-1"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
ArgumentCaptor<YarnContainerRequestBundle> argument = ArgumentCaptor.forClass(YarnContainerRequestBundle.class);
// 2 containers requested and one worker in use
Mockito.verify(mockYarnService, times(1)).
requestTargetNumberOfContainers(argument.capture(),
eq(ImmutableSet.of("GobblinYarnTaskRunner-1")));
Assert.assertEquals(argument.getValue().getTotalContainers(), 2);
}
/**
* Test for one workflow with two jobs
*/
@Test
public void testTwoJobs() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1", "job2"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows())
.thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
getJobContext(mockTaskDriver, ImmutableMap.of(3, "GobblinYarnTaskRunner-2"), "job2");
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
// 3 containers requested and 2 workers in use
ArgumentCaptor<YarnContainerRequestBundle> argument = ArgumentCaptor.forClass(YarnContainerRequestBundle.class);
Mockito.verify(mockYarnService, times(1)).
requestTargetNumberOfContainers(argument.capture(),
eq(ImmutableSet.of("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2")));
Assert.assertEquals(argument.getValue().getTotalContainers(), 3);
}
/**
* Test for two workflows
*/
@Test
public void testTwoWorkflows() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig1 =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1", "job2"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
WorkflowConfig mockWorkflowConfig2 =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job3"), TaskState.IN_PROGRESS, TargetState.START, "workflow2");
Mockito.when(mockTaskDriver.getWorkflows())
.thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig1, "workflow2", mockWorkflowConfig2));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
getJobContext(mockTaskDriver, ImmutableMap.of(3, "GobblinYarnTaskRunner-2"), "job2");
getJobContext(mockTaskDriver, ImmutableMap.of(4, "GobblinYarnTaskRunner-3"), "job3", ImmutableSet.of(4,5));
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList(
"GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2","GobblinYarnTaskRunner-3"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
// 5 containers requested and 3 workers in use
assertContainerRequest(mockYarnService, 5,
ImmutableSet.of("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2", "GobblinYarnTaskRunner-3"));
}
/**
* Test for three workflows with one not in progress and one marked for delete.
* The partitions for the workflow that is not in progress or is marked for delete should not be counted.
*/
@Test
public void testNotInProgressOrBeingDeleted() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig workflowInProgress = getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job-inProgress-1", "job-inProgress-2"), TaskState.IN_PROGRESS, TargetState.START, "workflowInProgress");
WorkflowConfig workflowCompleted = getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job-complete-1"), TaskState.COMPLETED, TargetState.STOP, "workflowCompleted");
WorkflowConfig workflowSetToBeDeleted = getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job-setToDelete-1"), TaskState.IN_PROGRESS, TargetState.DELETE, "workflowSetToBeDeleted");
Mockito.when(mockTaskDriver.getWorkflows()).thenReturn(ImmutableMap.of(
"workflowInProgress", workflowInProgress,
"workflowCompleted", workflowCompleted,
"workflowSetToBeDeleted", workflowSetToBeDeleted));
getJobContext(mockTaskDriver, ImmutableMap.of(1, "GobblinYarnTaskRunner-1"), "job-inProgress-1",
ImmutableSet.of(1,2));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-2"), "job-inProgress-2");
getJobContext(mockTaskDriver, ImmutableMap.of(1, "GobblinYarnTaskRunner-3"), "job-setToDelete-1");
getJobContext(mockTaskDriver, ImmutableMap.of(1, "GobblinYarnTaskRunner-4"), "job-complete-1",
ImmutableSet.of(1, 5));
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(
Arrays.asList("GobblinClusterManager",
"GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2", "GobblinYarnTaskRunner-3", "GobblinYarnTaskRunner-4"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
// 3 containers requested and 4 workers in use
assertContainerRequest(mockYarnService, 3,
ImmutableSet.of("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2", "GobblinYarnTaskRunner-3", "GobblinYarnTaskRunner-4"));
}
/**
* Test multiple partitions to one container
*/
@Test
public void testMultiplePartitionsPerContainer() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows())
.thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1");
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinYarnTaskRunner-1"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 2,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
// 1 container requested since 2 partitions and limit is 2 partitions per container. One worker in use.
assertContainerRequest(mockYarnService, 1, ImmutableSet.of("GobblinYarnTaskRunner-1"));
}
@Test
public void testOverprovision() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows())
.thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinYarnTaskRunner-1"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable1 =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.2, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable1.run();
// 3 containers requested to max and one worker in use
// NumPartitions = 2, Partitions per container = 1 and overprovision = 1.2
// so targetNumContainers = Ceil((2/1) * 1.2)) = 3.
assertContainerRequest(mockYarnService, 3, ImmutableSet.of("GobblinYarnTaskRunner-1"));
Mockito.reset(mockYarnService);
YarnAutoScalingManager.YarnAutoScalingRunnable runnable2 =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
0.1, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable2.run();
// 3 containers requested to max and one worker in use
// NumPartitions = 2, Partitions per container = 1 and overprovision = 1.2
// so targetNumContainers = Ceil((2/1) * 0.1)) = 1.
assertContainerRequest(mockYarnService, 1, ImmutableSet.of("GobblinYarnTaskRunner-1"));
Mockito.reset(mockYarnService);
YarnAutoScalingManager.YarnAutoScalingRunnable runnable3 =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
6.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable3.run();
// 3 containers requested to max and one worker in use
// NumPartitions = 2, Partitions per container = 1 and overprovision = 6.0,
// so targetNumContainers = Ceil((2/1) * 6.0)) = 12.
assertContainerRequest(mockYarnService, 12, ImmutableSet.of("GobblinYarnTaskRunner-1"));
}
/**
* Test suppressed exception
*/
@Test
public void testSuppressedException() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig = getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows()).thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinYarnTaskRunner-1"));
TestYarnAutoScalingRunnable runnable =
new TestYarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1, helixDataAccessor);
runnable.setRaiseException(true);
runnable.run();
ArgumentCaptor<YarnContainerRequestBundle> argument = ArgumentCaptor.forClass(YarnContainerRequestBundle.class);
Mockito.verify(mockYarnService, times(0)).
requestTargetNumberOfContainers(argument.capture(),
eq(ImmutableSet.of("GobblinYarnTaskRunner-1")));
Mockito.reset(mockYarnService);
runnable.setRaiseException(false);
runnable.run();
// 2 container requested
assertContainerRequest(mockYarnService, 2, ImmutableSet.of("GobblinYarnTaskRunner-1"));
}
public void testMaxValueEvictingQueue() {
Resource resource = Resource.newInstance(16, 1);
YarnAutoScalingManager.SlidingWindowReservoir window = new YarnAutoScalingManager.SlidingWindowReservoir(3, 10);
// Normal insertion with eviction of originally largest value
window.add(GobblinYarnTestUtils.createYarnContainerRequest(3, resource));
window.add(GobblinYarnTestUtils.createYarnContainerRequest(1, resource));
window.add(GobblinYarnTestUtils.createYarnContainerRequest(2, resource));
// Now it contains [3,1,2]
Assert.assertEquals(window.getMax().getTotalContainers(), 3);
window.add(GobblinYarnTestUtils.createYarnContainerRequest(1, resource));
// Now it contains [1,2,1]
Assert.assertEquals(window.getMax().getTotalContainers(), 2);
window.add(GobblinYarnTestUtils.createYarnContainerRequest(5, resource));
Assert.assertEquals(window.getMax().getTotalContainers(), 5);
// Now it contains [2,1,5]
window.add(GobblinYarnTestUtils.createYarnContainerRequest(11, resource));
// Still [2,1,5] as 11 > 10 thereby being rejected.
Assert.assertEquals(window.getMax().getTotalContainers(), 5);
}
/**
* Test the scenarios when an instance in cluster has no participants assigned for too long and got tagged as the
* candidate for scaling-down.
*/
@Test
public void testInstanceIdleBeyondTolerance() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig = getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
Mockito.when(mockTaskDriver.getWorkflows()).thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig));
// Having both partition assigned to single instance initially, in this case, GobblinYarnTaskRunner-2
getJobContext(mockTaskDriver, ImmutableMap.of(1,"GobblinYarnTaskRunner-2", 2, "GobblinYarnTaskRunner-2"), "job1");
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(Arrays.asList("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2"));
TestYarnAutoScalingRunnable runnable = new TestYarnAutoScalingRunnable(mockTaskDriver, mockYarnService,
1, helixDataAccessor);
runnable.run();
// 2 containers requested and one worker in use, while the evaluation will hold for true if not set externally,
// still tell YarnService there are two instances being used.
assertContainerRequest(mockYarnService, 2, ImmutableSet.of("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2"));
// Set failEvaluation which simulates the "beyond tolerance" case.
Mockito.reset(mockYarnService);
runnable.setAlwaysTagUnused(true);
runnable.run();
assertContainerRequest(mockYarnService, 2, ImmutableSet.of("GobblinYarnTaskRunner-2"));
}
@Test
public void testFlowsWithHelixTags() {
YarnService mockYarnService = mock(YarnService.class);
TaskDriver mockTaskDriver = mock(TaskDriver.class);
WorkflowConfig mockWorkflowConfig1 =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job1", "job2"), TaskState.IN_PROGRESS, TargetState.START, "workflow1");
WorkflowConfig mockWorkflowConfig2 =
getWorkflowConfig(mockTaskDriver, ImmutableSet.of("job3"), TaskState.IN_PROGRESS, TargetState.START, "workflow2");
Mockito.when(mockTaskDriver.getWorkflows())
.thenReturn(ImmutableMap.of("workflow1", mockWorkflowConfig1, "workflow2", mockWorkflowConfig2));
getJobContext(mockTaskDriver, ImmutableMap.of(2, "GobblinYarnTaskRunner-1"), "job1", ImmutableSet.of(1, 2));
getJobContext(mockTaskDriver, ImmutableMap.of(3, "GobblinYarnTaskRunner-2"), "job2");
getJobContext(mockTaskDriver, ImmutableMap.of(4, "GobblinYarnTaskRunner-3"), "job3", ImmutableSet.of(4, 5));
JobConfig mockJobConfig3 = mock(JobConfig.class);
Mockito.when(mockTaskDriver.getJobConfig("job3")).thenReturn(mockJobConfig3);
String helixTag = "test-Tag1";
Map<String, String> resourceMap = ImmutableMap.of(
GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_MEMORY_MBS, "512",
GobblinClusterConfigurationKeys.HELIX_JOB_CONTAINER_CORES, "8"
);
Mockito.when(mockJobConfig3.getInstanceGroupTag()).thenReturn(helixTag);
Mockito.when(mockJobConfig3.getJobCommandConfigMap()).thenReturn(resourceMap);
HelixDataAccessor helixDataAccessor = getHelixDataAccessor(
Arrays.asList("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2", "GobblinYarnTaskRunner-3"));
YarnAutoScalingManager.YarnAutoScalingRunnable runnable =
new YarnAutoScalingManager.YarnAutoScalingRunnable(mockTaskDriver, mockYarnService, 1,
1.0, noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
runnable.run();
// 5 containers requested and 3 workers in use
ArgumentCaptor<YarnContainerRequestBundle> argument = ArgumentCaptor.forClass(YarnContainerRequestBundle.class);
assertContainerRequest(argument, mockYarnService, 5, ImmutableSet.of("GobblinYarnTaskRunner-1", "GobblinYarnTaskRunner-2", "GobblinYarnTaskRunner-3"));
// Verify that 3 containers requested with default tag and resource setting,
// while 2 with specific helix tag and resource requirement
Map<String, Set<String>> resourceHelixTagMap = argument.getValue().getResourceHelixTagMap();
Map<String, Resource> helixTagResourceMap = argument.getValue().getHelixTagResourceMap();
Map<String, Integer> helixTagContainerCountMap = argument.getValue().getHelixTagContainerCountMap();
Assert.assertEquals(resourceHelixTagMap.size(), 2);
Assert.assertEquals(helixTagResourceMap.get(helixTag), Resource.newInstance(512, 8));
Assert.assertEquals(helixTagResourceMap.get(defaultHelixTag), Resource.newInstance(defaultContainerMemory, defaultContainerCores));
Assert.assertEquals((int) helixTagContainerCountMap.get(helixTag), 2);
Assert.assertEquals((int) helixTagContainerCountMap.get(defaultHelixTag), 3);
}
private HelixDataAccessor getHelixDataAccessor(List<String> taskRunners) {
HelixDataAccessor helixDataAccessor = mock(HelixDataAccessor.class);
Mockito.when(helixDataAccessor.keyBuilder()).thenReturn(new PropertyKey.Builder("cluster"));
Mockito.when(helixDataAccessor.getChildValuesMap(Mockito.any())).thenReturn(
taskRunners.stream().collect(Collectors.toMap((name) -> name, (name) -> new HelixProperty(""))));
return helixDataAccessor;
}
private WorkflowConfig getWorkflowConfig(TaskDriver mockTaskDriver, ImmutableSet<String> jobNames,
TaskState taskState, TargetState targetState, String workflowName) {
WorkflowConfig mockWorkflowConfig1 = mock(WorkflowConfig.class);
JobDag mockJobDag1 = mock(JobDag.class);
Mockito.when(mockJobDag1.getAllNodes()).thenReturn(jobNames);
Mockito.when(mockWorkflowConfig1.getJobDag()).thenReturn(mockJobDag1);
Mockito.when(mockWorkflowConfig1.getTargetState()).thenReturn(targetState);
WorkflowContext mockWorkflowContext1 = mock(WorkflowContext.class);
Mockito.when(mockWorkflowContext1.getWorkflowState()).thenReturn(taskState);
Mockito.when(mockTaskDriver.getWorkflowContext(workflowName)).thenReturn(mockWorkflowContext1);
return mockWorkflowConfig1;
}
private JobContext getJobContext(TaskDriver mockTaskDriver, Map<Integer, String> assignedParticipantMap, String jobName) {
return getJobContext(mockTaskDriver, assignedParticipantMap, jobName, assignedParticipantMap.keySet());
}
private JobContext getJobContext(
TaskDriver mockTaskDriver,
Map<Integer, String> assignedParticipantMap,
String jobName,
Set<Integer> partitionSet) {
JobContext mockJobContext = mock(JobContext.class);
Mockito.when(mockJobContext.getPartitionSet()).thenReturn(ImmutableSet.copyOf(partitionSet));
for (Map.Entry<Integer, String> entry : assignedParticipantMap.entrySet()) {
Mockito.when(mockJobContext.getAssignedParticipant(entry.getKey())).thenReturn(entry.getValue());
}
Mockito.when(mockTaskDriver.getJobContext(jobName)).thenReturn(mockJobContext);
return mockJobContext;
}
private void assertContainerRequest(ArgumentCaptor<YarnContainerRequestBundle> argument, YarnService mockYarnService, int expectedNumberOfContainers,
ImmutableSet<String> expectedInUseInstances) {
ArgumentCaptor.forClass(YarnContainerRequestBundle.class);
Mockito.verify(mockYarnService, times(1)).
requestTargetNumberOfContainers(argument.capture(),
eq(expectedInUseInstances));
Assert.assertEquals(argument.getValue().getTotalContainers(), expectedNumberOfContainers);
}
private void assertContainerRequest(YarnService mockYarnService, int expectedNumberOfContainers,
ImmutableSet<String> expectedInUseInstances) {
assertContainerRequest(ArgumentCaptor.forClass(YarnContainerRequestBundle.class), mockYarnService, expectedNumberOfContainers, expectedInUseInstances);
}
private static class TestYarnAutoScalingRunnable extends YarnAutoScalingManager.YarnAutoScalingRunnable {
boolean raiseException = false;
boolean alwaysUnused = false;
public TestYarnAutoScalingRunnable(TaskDriver taskDriver, YarnService yarnService, int partitionsPerContainer,
HelixDataAccessor helixDataAccessor) {
super(taskDriver, yarnService, partitionsPerContainer, 1.0,
noopQueue, helixDataAccessor, defaultHelixTag, defaultContainerMemory, defaultContainerCores);
}
@Override
void runInternal() {
if (this.raiseException) {
throw new RuntimeException("Test exception");
} else {
super.runInternal();
}
}
void setRaiseException(boolean raiseException) {
this.raiseException = raiseException;
}
void setAlwaysTagUnused(boolean alwaysUnused) {
this.alwaysUnused = alwaysUnused;
}
@Override
boolean isInstanceUnused(String participant) {
return alwaysUnused || super.isInstanceUnused(participant);
}
}
}
| 1,894 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/GobblinApplicationMasterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.mockito.Mockito;
import org.testng.annotations.Test;
import junit.framework.TestCase;
import org.apache.gobblin.cluster.GobblinHelixMultiManager;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.when;
public class GobblinApplicationMasterTest extends TestCase {
@Test
public void testDisableTaskRunnersFromPreviousExecutions() {
GobblinHelixMultiManager mockMultiManager = Mockito.mock(GobblinHelixMultiManager.class);
HelixManager mockHelixManager = Mockito.mock(HelixManager.class);
when(mockMultiManager.getJobClusterHelixManager()).thenReturn(mockHelixManager);
HelixAdmin mockHelixAdmin = Mockito.mock(HelixAdmin.class);
when(mockHelixManager.getClusterManagmentTool()).thenReturn(mockHelixAdmin);
when(mockHelixManager.getClusterName()).thenReturn("mockCluster");
HelixDataAccessor mockAccessor = Mockito.mock(HelixDataAccessor.class);
when(mockHelixManager.getHelixDataAccessor()).thenReturn(mockAccessor);
PropertyKey.Builder mockBuilder = Mockito.mock(PropertyKey.Builder.class);
when(mockAccessor.keyBuilder()).thenReturn(mockBuilder);
PropertyKey mockLiveInstancesKey = Mockito.mock(PropertyKey.class);
when(mockBuilder.liveInstances()).thenReturn(mockLiveInstancesKey);
int instanceCount = 3;
// GobblinYarnTaskRunner prefix would be disabled, while GobblinClusterManager prefix will not
ArrayList<String> gobblinYarnTaskRunnerPrefix = new ArrayList<String>();
ArrayList<String> gobblinClusterManagerPrefix = new ArrayList<String>();
for (int i = 0; i < instanceCount; i++) {
gobblinYarnTaskRunnerPrefix.add("GobblinYarnTaskRunner_TestInstance_" + i);
gobblinClusterManagerPrefix.add("GobblinClusterManager_TestInstance_" + i);
}
Map<String, HelixProperty> mockChildValues = new HashMap<>();
for (int i = 0; i < instanceCount; i++) {
mockChildValues.put(gobblinYarnTaskRunnerPrefix.get(i), Mockito.mock(HelixProperty.class));
mockChildValues.put(gobblinClusterManagerPrefix.get(i), Mockito.mock(HelixProperty.class));
}
when(mockAccessor.getChildValuesMap(mockLiveInstancesKey)).thenReturn(mockChildValues);
GobblinApplicationMaster.disableTaskRunnersFromPreviousExecutions(mockMultiManager);
for (int i = 0; i < instanceCount; i++) {
Mockito.verify(mockHelixAdmin).enableInstance("mockCluster", gobblinYarnTaskRunnerPrefix.get(i), false);
Mockito.verify(mockHelixAdmin, times(0)).enableInstance("mockCluster", gobblinClusterManagerPrefix.get(i), false);
}
}
} | 1,895 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnServiceTestWithExpiration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Predicate;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Tests for {@link YarnService}.
*/
@Test(groups = {"gobblin.yarn", "disabledOnCI"})
public class YarnServiceTestWithExpiration {
final Logger LOG = LoggerFactory.getLogger(YarnServiceIT.class);
private YarnClient yarnClient;
private MiniYARNCluster yarnCluster;
private TestExpiredYarnService expiredYarnService;
private Config config;
private YarnConfiguration clusterConf;
private ApplicationId applicationId;
private ApplicationAttemptId applicationAttemptId;
private final EventBus eventBus = new EventBus("YarnServiceIT");
private final Closer closer = Closer.create();
private static void setEnv(String key, String value) {
try {
Map<String, String> env = System.getenv();
Class<?> cl = env.getClass();
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Map<String, String> writableEnv = (Map<String, String>) field.get(env);
writableEnv.put(key, value);
} catch (Exception e) {
throw new IllegalStateException("Failed to set environment variable", e);
}
}
@BeforeClass
public void setUp() throws Exception {
// Set java home in environment since it isn't set on some systems
String javaHome = System.getProperty("java.home");
setEnv("JAVA_HOME", javaHome);
this.clusterConf = new YarnConfiguration();
this.clusterConf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "100");
this.clusterConf.set(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, "10000");
this.clusterConf.set(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, "60000");
this.clusterConf.set(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, "1000");
this.yarnCluster =
this.closer.register(new MiniYARNCluster("YarnServiceTestCluster", 4, 1,
1));
this.yarnCluster.init(this.clusterConf);
this.yarnCluster.start();
// YARN client should not be started before the Resource Manager is up
AssertWithBackoff.create().logger(LOG).timeoutMs(10000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
}
}, "Waiting for RM");
this.yarnClient = this.closer.register(YarnClient.createYarnClient());
this.yarnClient.init(this.clusterConf);
this.yarnClient.start();
URL url = YarnServiceIT.class.getClassLoader()
.getResource(YarnServiceIT.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.config = ConfigFactory.parseURL(url).resolve();
// Start a dummy application manager so that the YarnService can use the AM-RM token.
startApp();
// create and start the test yarn service
this.expiredYarnService = new TestExpiredYarnService(this.config, "testApp", "appId",
this.clusterConf,
FileSystem.getLocal(new Configuration()), this.eventBus);
this.expiredYarnService.startUp();
}
private void startApp() throws Exception {
// submit a dummy app
ApplicationSubmissionContext appSubmissionContext =
yarnClient.createApplication().getApplicationSubmissionContext();
this.applicationId = appSubmissionContext.getApplicationId();
ContainerLaunchContext containerLaunchContext =
BuilderUtils.newContainerLaunchContext(Collections.emptyMap(), Collections.emptyMap(),
Arrays.asList("sleep", "100"), Collections.emptyMap(), null, Collections.emptyMap());
// Setup the application submission context
appSubmissionContext.setApplicationName("TestApp");
appSubmissionContext.setResource(Resource.newInstance(128, 1));
appSubmissionContext.setPriority(Priority.newInstance(0));
appSubmissionContext.setAMContainerSpec(containerLaunchContext);
this.yarnClient.submitApplication(appSubmissionContext);
// wait for application to be accepted
int i;
RMAppAttempt attempt = null;
for (i = 0; i < 120; i++) {
ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);
if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
this.applicationAttemptId = appReport.getCurrentApplicationAttemptId();
attempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
.get(appReport.getCurrentApplicationAttemptId().getApplicationId()).getCurrentAppAttempt();
break;
}
Thread.sleep(1000);
}
Assert.assertTrue(i < 120, "timed out waiting for ACCEPTED state");
// Set the AM-RM token in the UGI for access during testing
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser()
.getUserName()));
UserGroupInformation.getCurrentUser().addToken(attempt.getAMRMToken());
}
@AfterClass
public void tearDown() throws IOException, TimeoutException, YarnException {
try {
this.yarnClient.killApplication(this.applicationAttemptId.getApplicationId());
this.expiredYarnService.shutDown();
Assert.assertEquals(this.expiredYarnService.getContainerMap().size(), 0);
} finally {
this.closer.close();
}
}
/**
* Test that the yarn service can handle onStartContainerError right
*/
@Test(groups = {"gobblin.yarn", "disabledOnCI"})
public void testStartError() throws Exception{
Resource resource = Resource.newInstance(16, 1);
this.expiredYarnService.requestTargetNumberOfContainers(
GobblinYarnTestUtils.createYarnContainerRequest(10, resource), Collections.EMPTY_SET);
Assert.assertFalse(this.expiredYarnService.getMatchingRequestsList(resource).isEmpty());
AssertWithBackoff.create().logger(LOG).timeoutMs(60000).maxSleepMs(2000).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
//Since it may retry to request the container and start again, so the number may lager than 10
return expiredYarnService.completedContainers.size() >= 10
&& expiredYarnService.startErrorContainers.size() >= 10;
}
}, "Waiting for container completed");
}
private static class TestExpiredYarnService extends YarnServiceIT.TestYarnService {
public HashSet<ContainerId> startErrorContainers = new HashSet<>();
public HashSet<ContainerStatus> completedContainers = new HashSet<>();
public TestExpiredYarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus) throws Exception {
super(config, applicationName, applicationId, yarnConfiguration, fs, eventBus);
}
@Override
protected NMClientCallbackHandler getNMClientCallbackHandler() {
return new TestNMClientCallbackHandler();
}
@Override
protected void handleContainerCompletion(ContainerStatus containerStatus){
super.handleContainerCompletion(containerStatus);
completedContainers.add(containerStatus);
}
protected ContainerLaunchContext newContainerLaunchContext(ContainerInfo containerInfo)
throws IOException {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return BuilderUtils.newContainerLaunchContext(Collections.emptyMap(), Collections.emptyMap(),
Arrays.asList("sleep", "600"), Collections.emptyMap(), null, Collections.emptyMap());
}
private class TestNMClientCallbackHandler extends YarnService.NMClientCallbackHandler {
@Override
public void onStartContainerError(ContainerId containerId, Throwable t) {
startErrorContainers.add(containerId);
}
}
}
} | 1,896 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import java.net.URL;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixManager;
import org.mockito.MockedStatic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import static org.apache.gobblin.cluster.GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME;
import static org.apache.gobblin.cluster.GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME;
import static org.mockito.Mockito.*;
/**
* Tests for {@link YarnService}.
*/
public class YarnServiceTest {
final Logger LOG = LoggerFactory.getLogger(YarnServiceTest.class);
private Config config;
private YarnConfiguration clusterConf = new YarnConfiguration();
private final EventBus eventBus = new EventBus("YarnServiceTest");
AMRMClientAsync mockAMRMClient;
RegisterApplicationMasterResponse mockRegisterApplicationMasterResponse;
Resource mockResource;
FileSystem mockFs;
@BeforeClass
public void setUp() throws Exception {
mockAMRMClient = mock(AMRMClientAsync.class);
mockRegisterApplicationMasterResponse = mock(RegisterApplicationMasterResponse.class);
mockResource = mock(Resource.class);
mockFs = mock(FileSystem.class);
URL url = YarnServiceTest.class.getClassLoader()
.getResource(YarnServiceTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.config = ConfigFactory.parseURL(url).resolve();
MockedStatic<AMRMClientAsync> amrmClientAsyncMockStatic = mockStatic(AMRMClientAsync.class);
MockedStatic<AMRMClientAsyncImpl> amrmClientAsyncImplMockStatic = mockStatic(AMRMClientAsyncImpl.class);
amrmClientAsyncMockStatic.when(() -> AMRMClientAsync.createAMRMClientAsync(anyInt(), any(AMRMClientAsync.CallbackHandler.class)))
.thenReturn(mockAMRMClient);
doNothing().when(mockAMRMClient).init(any(YarnConfiguration.class));
when(mockAMRMClient.registerApplicationMaster(anyString(), anyInt(), anyString()))
.thenReturn(mockRegisterApplicationMasterResponse);
when(mockRegisterApplicationMasterResponse.getMaximumResourceCapability())
.thenReturn(mockResource);
}
/**
* Testing the race condition between the yarn start up and creating yarn container request
* Block on creating new yarn containers until start up of the yarn service and purging is complete
*/
@Test(groups = {"gobblin.yarn"})
public void testYarnStartUpFirst() throws Exception{
// Create the test yarn service, but don't start yet
YarnService yarnService = new TestYarnService(this.config, "testApp", "appId",
this.clusterConf, mockFs, this.eventBus);
// Not allowed to request target number of containers since yarnService hasn't started up yet.
Assert.assertFalse(yarnService.requestTargetNumberOfContainers(new YarnContainerRequestBundle(), Collections.EMPTY_SET));
// Start the yarn service
yarnService.startUp();
// Allowed to request target number of containers after yarnService is started up.
Assert.assertTrue(yarnService.requestTargetNumberOfContainers(new YarnContainerRequestBundle(), Collections.EMPTY_SET));
}
@Test(groups = {"gobblin.yarn"})
public void testYarnContainerStartupCommand() throws Exception{
final int resourceMemoryMB = 2048;
final int jvmMemoryOverheadMB = 10;
final double jvmXmxRatio = 0.8;
final int expectedJavaHeapSizeMB = (int)(resourceMemoryMB * jvmXmxRatio) - jvmMemoryOverheadMB;
final String helixInstance = "helixInstance1";
final String helixTag = "helixTag";
Config modifiedConfig = this.config
.withValue(GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY, ConfigValueFactory.fromAnyRef("10"))
.withValue(GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY, ConfigValueFactory.fromAnyRef("0.8"));
TestYarnService yarnService = new TestYarnService(modifiedConfig, "testApp2", "appId2",
this.clusterConf, FileSystem.getLocal(new Configuration()), this.eventBus);
ContainerId containerId = ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 0),
0), 0);
Resource resource = Resource.newInstance(resourceMemoryMB, 1);
Container container = Container.newInstance(containerId, null, null, resource, null, null);
YarnService.ContainerInfo containerInfo =
yarnService.new ContainerInfo(container, helixInstance, helixTag);
String command = containerInfo.getStartupCommand();
LOG.info(command);
Assert.assertTrue(command.contains("-Xmx" + expectedJavaHeapSizeMB +"M"));
Assert.assertTrue(command.contains(String.format("--%s %s", HELIX_INSTANCE_NAME_OPTION_NAME, helixInstance)));
Assert.assertTrue(command.contains(String.format("--%s %s", HELIX_INSTANCE_TAGS_OPTION_NAME, helixTag)));
Assert.assertTrue(command.endsWith("1><LOG_DIR>/GobblinYarnTaskRunner.stdout 2><LOG_DIR>/GobblinYarnTaskRunner.stderr"));
}
static class TestYarnService extends YarnService {
public TestYarnService(Config config, String applicationName, String applicationId, YarnConfiguration yarnConfiguration,
FileSystem fs, EventBus eventBus) throws Exception {
super(config, applicationName, applicationId, yarnConfiguration, fs, eventBus, getMockHelixManager(config), getMockHelixAdmin());
}
private static HelixManager getMockHelixManager(Config config) {
HelixManager helixManager = mock(HelixManager.class);
when(helixManager.getClusterName()).thenReturn(config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
when(helixManager.getMetadataStoreConnectionString()).thenReturn("stub");
return helixManager;
}
private static HelixAdmin getMockHelixAdmin() { return mock(HelixAdmin.class); }
protected ContainerLaunchContext newContainerLaunchContext(ContainerInfo containerInfo)
throws IOException {
return BuilderUtils.newContainerLaunchContext(Collections.emptyMap(), Collections.emptyMap(),
Arrays.asList("sleep", "60000"), Collections.emptyMap(), null, Collections.emptyMap());
}
@Override
protected ByteBuffer getSecurityTokens() throws IOException { return mock(ByteBuffer.class); }
}
}
| 1,897 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnSecurityManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryOneTime;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.cluster.TestHelper;
import org.apache.gobblin.testing.AssertWithBackoff;
import static org.mockito.Mockito.any;
/**
* Unit tests for {@link YarnAppSecurityManagerWithKeytabs} and {@link YarnContainerSecurityManager}.
*
* <p>
* This class tests {@link YarnAppSecurityManagerWithKeytabs} and {@link YarnContainerSecurityManager} together
* as it is more convenient to test both here where all dependencies are setup between the two.
* </p>
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. The Curator
* framework is used to provide a ZooKeeper client. This class also uses a {@link HelixManager} as
* being required by {@link YarnAppSecurityManagerWithKeytabs}. The local file system as returned by
* {@link FileSystem#getLocal(Configuration)} is used for writing the testing delegation token, which
* is acquired by mocking the method {@link FileSystem#getDelegationToken(String)} on the local
* {@link FileSystem} instance.
* </p>
* @author Yinan Li
*/
@Test(groups = { "gobblin.yarn" })
public class YarnSecurityManagerTest {
final Logger LOG = LoggerFactory.getLogger(YarnSecurityManagerTest.class);
private static final String HELIX_TEST_INSTANCE_PARTICIPANT = HelixUtils.getHelixInstanceName("TestInstance", 1);
private CuratorFramework curatorFramework;
private HelixManager helixManager;
private HelixManager helixManagerParticipant;
private Configuration configuration;
private FileSystem localFs;
private Path baseDir;
private Path tokenFilePath;
private Token<?> fsToken;
private List<Token<?>> allTokens;
private YarnAppSecurityManagerWithKeytabs _yarnAppYarnAppSecurityManagerWithKeytabs;
private YarnContainerSecurityManager yarnContainerSecurityManager;
private final Closer closer = Closer.create();
@BeforeClass
public void setUp() throws Exception {
// Use a random ZK port
TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
this.curatorFramework = this.closer.register(
CuratorFrameworkFactory.newClient(testingZKServer.getConnectString(), new RetryOneTime(2000)));
this.curatorFramework.start();
URL url = YarnSecurityManagerTest.class.getClassLoader().getResource(
YarnSecurityManagerTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
Config config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.resolve();
String zkConnectingString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String helixClusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectingString, helixClusterName);
this.helixManager = HelixManagerFactory.getZKHelixManager(
helixClusterName, TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.SPECTATOR, zkConnectingString);
this.helixManager.connect();
this.helixManagerParticipant = HelixManagerFactory.getZKHelixManager(
helixClusterName, HELIX_TEST_INSTANCE_PARTICIPANT, InstanceType.PARTICIPANT, zkConnectingString);
this.helixManagerParticipant.connect();
this.configuration = new Configuration();
this.localFs = Mockito.spy(FileSystem.getLocal(this.configuration));
this.fsToken = new Token<>();
this.fsToken.setKind(new Text("HDFS_DELEGATION_TOKEN"));
this.fsToken.setService(new Text("HDFS"));
this.allTokens = new ArrayList<>();
allTokens.add(fsToken);
Token<?>[] allTokenArray = new Token<?>[2];
allTokenArray[0]= fsToken;
Mockito.<Token<?>[]>when(localFs.addDelegationTokens(any(String.class), any(Credentials.class)))
.thenReturn(allTokenArray);
this.baseDir = new Path(YarnSecurityManagerTest.class.getSimpleName());
this.tokenFilePath = new Path(this.baseDir, GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
this._yarnAppYarnAppSecurityManagerWithKeytabs = Mockito.spy(new YarnAppSecurityManagerWithKeytabs(config, this.helixManager, this.localFs, this.tokenFilePath));
this.yarnContainerSecurityManager = new YarnContainerSecurityManager(config, this.localFs, new EventBus());
Mockito.doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock invocation) throws Throwable {
_yarnAppYarnAppSecurityManagerWithKeytabs.credentials.addToken(new Text("HDFS_DELEGATION_TOKEN"), fsToken);
return null;
}
}).when(_yarnAppYarnAppSecurityManagerWithKeytabs).getNewDelegationTokenForLoginUser();
}
@Test
public void testGetNewDelegationTokenForLoginUser() throws IOException, InterruptedException {
this._yarnAppYarnAppSecurityManagerWithKeytabs.getNewDelegationTokenForLoginUser();
}
@Test(dependsOnMethods = "testGetNewDelegationTokenForLoginUser")
public void testWriteDelegationTokenToFile() throws IOException {
this._yarnAppYarnAppSecurityManagerWithKeytabs.writeDelegationTokenToFile();
Assert.assertTrue(this.localFs.exists(this.tokenFilePath));
assertToken(YarnHelixUtils.readTokensFromFile(this.tokenFilePath, this.configuration));
}
static class GetHelixMessageNumFunc implements Function<Void, Integer> {
private final CuratorFramework curatorFramework;
private final String testName;
private final String instanceName;
private final InstanceType instanceType;
private final String path;
public GetHelixMessageNumFunc(String testName, InstanceType instanceType, String instanceName, CuratorFramework curatorFramework) {
this.curatorFramework = curatorFramework;
this.testName = testName;
this.instanceType = instanceType;
this.instanceName = instanceName;
switch (instanceType) {
case CONTROLLER:
this.path = String.format("/%s/CONTROLLER/MESSAGES", this.testName);
break;
case PARTICIPANT:
this.path = String.format("/%s/INSTANCES/%s/MESSAGES", this.testName, this.instanceName);
break;
default:
throw new RuntimeException("Invalid instance type " + instanceType.name());
}
}
@Override
public Integer apply(Void input) {
try {
return this.curatorFramework.getChildren().forPath(this.path).size();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testSendTokenFileUpdatedMessage() throws Exception {
Logger log = LoggerFactory.getLogger("testSendTokenFileUpdatedMessage");
this._yarnAppYarnAppSecurityManagerWithKeytabs.sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
Assert.assertEquals(this.curatorFramework.checkExists().forPath(
String.format("/%s/CONTROLLER/MESSAGES", YarnSecurityManagerTest.class.getSimpleName())).getVersion(), 0);
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertEquals(new GetHelixMessageNumFunc(YarnSecurityManagerTest.class.getSimpleName(), InstanceType.CONTROLLER, "",
this.curatorFramework), 1, "1 controller message queued");
this._yarnAppYarnAppSecurityManagerWithKeytabs.sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT, HELIX_TEST_INSTANCE_PARTICIPANT);
Assert.assertEquals(this.curatorFramework.checkExists().forPath(
String.format("/%s/INSTANCES/%s/MESSAGES", YarnSecurityManagerTest.class.getSimpleName(), HELIX_TEST_INSTANCE_PARTICIPANT)).getVersion(), 0);
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertEquals(new GetHelixMessageNumFunc(YarnSecurityManagerTest.class.getSimpleName(), InstanceType.PARTICIPANT, HELIX_TEST_INSTANCE_PARTICIPANT,
this.curatorFramework), 1, "1 controller message queued");
}
@Test(dependsOnMethods = "testWriteDelegationTokenToFile")
public void testYarnContainerSecurityManager() throws IOException {
Credentials credentials = this.yarnContainerSecurityManager.readCredentials(this.tokenFilePath);
assertToken(credentials.getAllTokens());
this.yarnContainerSecurityManager.addCredentials(credentials);
assertToken(UserGroupInformation.getCurrentUser().getTokens());
}
@AfterClass
public void tearDown() throws IOException {
try {
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
if (this.helixManagerParticipant.isConnected()) {
this.helixManagerParticipant.disconnect();
}
this.localFs.delete(this.baseDir, true);
} catch (Throwable t) {
throw this.closer.rethrow(t);
} finally {
this.closer.close();
}
}
private void assertToken(Collection<Token<?>> tokens) {
tokens.forEach( token -> org.junit.Assert.assertTrue(allTokens.contains(token)));
}
}
| 1,898 |
0 | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-yarn/src/test/java/org/apache/gobblin/yarn/YarnHelixUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.testng.annotations.Test;
public class YarnHelixUtilsTest {
/**
* Uses the token file created using {@link GobblinYarnTestUtils#createTokenFileForService(Path, String)} method and
* added to the resources folder.
* @throws IOException
*/
@Test
public void testUpdateToken()
throws IOException {
//Ensure the credentials is empty on start
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
Assert.assertNull(credentials.getToken(new Text("testService")));
//Attempt reading a non-existent token file and ensure credentials object has no tokens
YarnHelixUtils.updateToken(".token1");
credentials = UserGroupInformation.getCurrentUser().getCredentials();
Assert.assertNull(credentials.getToken(new Text("testService")));
//Read a valid token file and ensure the credentials object has a valid token
YarnHelixUtils.updateToken(GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
credentials = UserGroupInformation.getCurrentUser().getCredentials();
Token<?> readToken = credentials.getToken(new Text("testService"));
Assert.assertNotNull(readToken);
}
} | 1,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.