index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/source/HiveSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.source;
import com.google.common.util.concurrent.UncheckedExecutionException;
import java.io.IOException;
import java.net.URI;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.avro.AvroSchemaManager;
import org.apache.gobblin.data.management.conversion.hive.avro.SchemaNotFoundException;
import org.apache.gobblin.data.management.conversion.hive.events.EventConstants;
import org.apache.gobblin.data.management.conversion.hive.events.EventWorkunitUtils;
import org.apache.gobblin.data.management.conversion.hive.provider.HiveUnitUpdateProvider;
import org.apache.gobblin.data.management.conversion.hive.provider.UpdateNotFoundException;
import org.apache.gobblin.data.management.conversion.hive.provider.UpdateProviderFactory;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarkerFactory;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.HiveUtils;
import org.apache.gobblin.data.management.copy.hive.PartitionFilterGenerator;
import org.apache.gobblin.data.management.copy.hive.filter.LookbackPartitionFilterGenerator;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveBaseExtractorFactory;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveConvertExtractorFactory;
/**
* <p>
* A {@link Source} that creates generic workunits for a hive table or a hive partition.
* </p>
* <ul>
* <li>It uses the {@link HiveDatasetFinder} to find all hive tables and partitions
* <li>The update time of a hive {@link Table} or a hive {@link Partition} if found using {@link HiveUnitUpdateProvider}
* <li>The update time from the previous run is used as previous hive watermark.{@link HiveSourceWatermarker} is
* used to get previous hive watermarks
* </ul>
*
*{@link WorkUnit}s are created if the previous high watermark of a {@link Partition}
* or a {@link Table} are lower than the latest update time.
*
* <p>
* The {@link WorkUnit}s contain a serialized json of the {@link SerializableHiveTable} or {@link SerializableHivePartition}
* This is later deserialized by the extractor.
* </p>
*/
@Slf4j
@SuppressWarnings("rawtypes")
@Alpha
public class HiveSource implements Source {
public static final String DISABLE_AVRO_CHAECK = "hive.source.disable.avro.check";
public static final boolean DEFAULT_DISABLE_AVRO_CHAECK = false;
public static final String HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY = "hive.source.maximum.lookbackDays";
public static final int DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS = 3;
public static final String HIVE_SOURCE_DATASET_FINDER_CLASS_KEY = "hive.dataset.finder.class";
public static final String DEFAULT_HIVE_SOURCE_DATASET_FINDER_CLASS = HiveDatasetFinder.class.getName();
public static final String HIVE_SOURCE_DATASET_FINDER_PARTITION_FILTER_KEY = "hive.dataset.finder.partitionfilter.class";
public static final String DEFAULT_HIVE_SOURCE_DATASET_FINDER_PARTITION_FILTER_CLASS = LookbackPartitionFilterGenerator.class.getName();
public static final String DISTCP_REGISTRATION_GENERATION_TIME_KEY = "registrationGenerationTimeMillis";
public static final String HIVE_SOURCE_WATERMARKER_FACTORY_CLASS_KEY = "hive.source.watermarker.factoryClass";
public static final String DEFAULT_HIVE_SOURCE_WATERMARKER_FACTORY_CLASS = PartitionLevelWatermarker.Factory.class.getName();
public static final String HIVE_SOURCE_EXTRACTOR_TYPE = "hive.source.extractorType";
public static final String DEFAULT_HIVE_SOURCE_EXTRACTOR_TYPE = HiveConvertExtractorFactory.class.getName();
public static final String HIVE_SOURCE_CREATE_WORKUNITS_FOR_PARTITIONS = "hive.source.createWorkunitsForPartitions";
public static final boolean DEFAULT_HIVE_SOURCE_CREATE_WORKUNITS_FOR_PARTITIONS = true;
public static final String HIVE_SOURCE_FS_URI = "hive.source.fs.uri";
/***
* Comma separated list of keywords to look for in path of table (in non-partitioned case) / partition (in partitioned case)
* and if the keyword is found then ignore the table / partition from processing.
*
* This is useful in scenarios like:
* - when the user wants to ignore hourly partitions and only process daily partitions and only way to identify that
* is by the path both store there data. eg: /foo/bar/2016/12/01/hourly/00
* - when the user wants to ignore partitions that refer to partitions pointing to /tmp location (for debug reasons)
*/
public static final String HIVE_SOURCE_IGNORE_DATA_PATH_IDENTIFIER_KEY = "hive.source.ignoreDataPathIdentifier";
public static final String DEFAULT_HIVE_SOURCE_IGNORE_DATA_PATH_IDENTIFIER = StringUtils.EMPTY;
public static final Gson GENERICS_AWARE_GSON = GsonInterfaceAdapter.getGson(Object.class);
public static final Splitter COMMA_BASED_SPLITTER = Splitter.on(",").omitEmptyStrings().trimResults();
protected MetricContext metricContext;
protected EventSubmitter eventSubmitter;
protected AvroSchemaManager avroSchemaManager;
protected HiveUnitUpdateProvider updateProvider;
protected HiveSourceWatermarker watermarker;
protected IterableDatasetFinder<HiveDataset> datasetFinder;
protected List<WorkUnit> workunits;
protected PartitionFilterGenerator partitionFilterGenerator;
protected long maxLookBackTime;
protected long beginGetWorkunitsTime;
protected List<String> ignoreDataPathIdentifierList;
protected final ClassAliasResolver<HiveBaseExtractorFactory> classAliasResolver =
new ClassAliasResolver<>(HiveBaseExtractorFactory.class);
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
this.beginGetWorkunitsTime = System.currentTimeMillis();
initialize(state);
EventSubmitter.submit(Optional.of(this.eventSubmitter), EventConstants.CONVERSION_FIND_HIVE_TABLES_EVENT);
Iterator<HiveDataset> iterator = this.datasetFinder.getDatasetsIterator();
boolean disableAvroCheck = state.getPropAsBoolean(DISABLE_AVRO_CHAECK, DEFAULT_DISABLE_AVRO_CHAECK);
while (iterator.hasNext()) {
HiveDataset hiveDataset = iterator.next();
try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) {
log.debug(String.format("Processing dataset: %s", hiveDataset));
// Create workunits for partitions
if (hiveDataset.getTable().isPartitioned()
&& state.getPropAsBoolean(HIVE_SOURCE_CREATE_WORKUNITS_FOR_PARTITIONS,
DEFAULT_HIVE_SOURCE_CREATE_WORKUNITS_FOR_PARTITIONS)) {
createWorkunitsForPartitionedTable(hiveDataset, client, disableAvroCheck);
} else {
createWorkunitForNonPartitionedTable(hiveDataset, disableAvroCheck);
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
int realWorkunits = this.workunits.size();
this.watermarker.onGetWorkunitsEnd(this.workunits);
log.info(String.format("Created %s real workunits and %s watermark workunits", realWorkunits,
(this.workunits.size() - realWorkunits)));
return this.workunits;
}
@VisibleForTesting
public void initialize(SourceState state) throws IOException {
this.updateProvider = UpdateProviderFactory.create(state);
this.metricContext = Instrumented.getMetricContext(state, HiveSource.class);
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE).build();
this.avroSchemaManager = new AvroSchemaManager(getSourceFs(state), state);
this.workunits = Lists.newArrayList();
this.watermarker =
GobblinConstructorUtils.invokeConstructor(HiveSourceWatermarkerFactory.class,
state.getProp(HIVE_SOURCE_WATERMARKER_FACTORY_CLASS_KEY, DEFAULT_HIVE_SOURCE_WATERMARKER_FACTORY_CLASS))
.createFromState(state);
EventSubmitter.submit(Optional.of(this.eventSubmitter), EventConstants.CONVERSION_SETUP_EVENT);
this.datasetFinder = GobblinConstructorUtils.invokeConstructor(HiveDatasetFinder.class,
state.getProp(HIVE_SOURCE_DATASET_FINDER_CLASS_KEY, DEFAULT_HIVE_SOURCE_DATASET_FINDER_CLASS), getSourceFs(state), state.getProperties(),
this.eventSubmitter);
int maxLookBackDays = state.getPropAsInt(HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY, DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS);
this.maxLookBackTime = new DateTime().minusDays(maxLookBackDays).getMillis();
this.ignoreDataPathIdentifierList = COMMA_BASED_SPLITTER.splitToList(state.getProp(HIVE_SOURCE_IGNORE_DATA_PATH_IDENTIFIER_KEY,
DEFAULT_HIVE_SOURCE_IGNORE_DATA_PATH_IDENTIFIER));
this.partitionFilterGenerator = GobblinConstructorUtils.invokeConstructor(PartitionFilterGenerator.class,
state.getProp(HIVE_SOURCE_DATASET_FINDER_PARTITION_FILTER_KEY,
DEFAULT_HIVE_SOURCE_DATASET_FINDER_PARTITION_FILTER_CLASS), state.getProperties());
silenceHiveLoggers();
}
@Deprecated
protected void createWorkunitForNonPartitionedTable(HiveDataset hiveDataset) throws IOException {
this.createWorkunitForNonPartitionedTable(hiveDataset, false);
}
protected void createWorkunitForNonPartitionedTable(HiveDataset hiveDataset, boolean disableAvroCheck) throws IOException {
// Create workunits for tables
try {
long tableProcessTime = new DateTime().getMillis();
long updateTime = this.updateProvider.getUpdateTime(hiveDataset.getTable());
this.watermarker.onTableProcessBegin(hiveDataset.getTable(), tableProcessTime);
LongWatermark lowWatermark = this.watermarker.getPreviousHighWatermark(hiveDataset.getTable());
if (!shouldCreateWorkUnit(hiveDataset.getTable().getPath())) {
log.info(String.format(
"Not creating workunit for table %s as partition path %s contains data path tokens to ignore %s",
hiveDataset.getTable().getCompleteName(), hiveDataset.getTable().getPath(), this.ignoreDataPathIdentifierList));
return;
}
if (shouldCreateWorkunit(hiveDataset.getTable(), lowWatermark)) {
log.info(String.format(
"Creating workunit for table %s as updateTime %s or createTime %s is greater than low watermark %s",
hiveDataset.getTable().getCompleteName(), updateTime, hiveDataset.getTable().getTTable().getCreateTime(),
lowWatermark.getValue()));
HiveWorkUnit hiveWorkUnit = workUnitForTable(hiveDataset, disableAvroCheck);
LongWatermark expectedDatasetHighWatermark =
this.watermarker.getExpectedHighWatermark(hiveDataset.getTable(), tableProcessTime);
hiveWorkUnit.setWatermarkInterval(new WatermarkInterval(lowWatermark, expectedDatasetHighWatermark));
EventWorkunitUtils.setTableSlaEventMetadata(hiveWorkUnit, hiveDataset.getTable(), updateTime, lowWatermark.getValue(),
this.beginGetWorkunitsTime);
this.workunits.add(hiveWorkUnit);
log.debug(String.format("Workunit added for table: %s", hiveWorkUnit));
} else {
log.info(String
.format(
"Not creating workunit for table %s as updateTime %s and createTime %s is not greater than low watermark %s",
hiveDataset.getTable().getCompleteName(), updateTime, hiveDataset.getTable().getTTable()
.getCreateTime(), lowWatermark.getValue()));
}
} catch (UpdateNotFoundException e) {
log.error(String.format("Not Creating workunit for %s as update time was not found. %s", hiveDataset.getTable()
.getCompleteName(), e.getMessage()), e);
} catch (SchemaNotFoundException e) {
log.error(String.format("Not Creating workunit for %s as schema was not found. %s", hiveDataset.getTable()
.getCompleteName(), e.getMessage()), e);
}
}
@Deprecated
protected HiveWorkUnit workUnitForTable(HiveDataset hiveDataset) throws IOException {
return this.workUnitForTable(hiveDataset, false);
}
protected HiveWorkUnit workUnitForTable(HiveDataset hiveDataset, boolean disableAvroCheck) throws IOException {
HiveWorkUnit hiveWorkUnit = new HiveWorkUnit(hiveDataset);
if (disableAvroCheck || isAvro(hiveDataset.getTable())) {
hiveWorkUnit.setTableSchemaUrl(this.avroSchemaManager.getSchemaUrl(hiveDataset.getTable()));
}
return hiveWorkUnit;
}
@Deprecated
protected void createWorkunitsForPartitionedTable(HiveDataset hiveDataset, AutoReturnableObject<IMetaStoreClient> client) throws IOException {
this.createWorkunitsForPartitionedTable(hiveDataset, client, false);
}
protected void createWorkunitsForPartitionedTable(HiveDataset hiveDataset, AutoReturnableObject<IMetaStoreClient> client, boolean disableAvroCheck) throws IOException {
long tableProcessTime = new DateTime().getMillis();
this.watermarker.onTableProcessBegin(hiveDataset.getTable(), tableProcessTime);
Optional<String> partitionFilter = Optional.fromNullable(this.partitionFilterGenerator.getFilter(hiveDataset));
List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), partitionFilter);
for (Partition sourcePartition : sourcePartitions) {
if (isOlderThanLookback(sourcePartition)) {
continue;
}
LongWatermark lowWatermark = watermarker.getPreviousHighWatermark(sourcePartition);
try {
if (!shouldCreateWorkUnit(new Path(sourcePartition.getLocation()))) {
log.info(String.format(
"Not creating workunit for partition %s as partition path %s contains data path tokens to ignore %s",
sourcePartition.getCompleteName(), sourcePartition.getLocation(), this.ignoreDataPathIdentifierList));
continue;
}
long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
if (shouldCreateWorkunit(sourcePartition, lowWatermark)) {
log.debug(String.format("Processing partition: %s", sourcePartition));
long partitionProcessTime = new DateTime().getMillis();
this.watermarker.onPartitionProcessBegin(sourcePartition, partitionProcessTime, updateTime);
LongWatermark expectedPartitionHighWatermark = this.watermarker.getExpectedHighWatermark(sourcePartition,
tableProcessTime, partitionProcessTime);
HiveWorkUnit hiveWorkUnit = workUnitForPartition(hiveDataset, sourcePartition, disableAvroCheck);
hiveWorkUnit.setWatermarkInterval(new WatermarkInterval(lowWatermark, expectedPartitionHighWatermark));
EventWorkunitUtils.setPartitionSlaEventMetadata(hiveWorkUnit, hiveDataset.getTable(), sourcePartition, updateTime,
lowWatermark.getValue(), this.beginGetWorkunitsTime);
workunits.add(hiveWorkUnit);
log.info(String.format("Creating workunit for partition %s as updateTime %s is greater than low watermark %s",
sourcePartition.getCompleteName(), updateTime, lowWatermark.getValue()));
} else {
// If watermark tracking at a partition level is necessary, create a dummy workunit for this partition here.
log.info(String.format(
"Not creating workunit for partition %s as updateTime %s is lesser than low watermark %s",
sourcePartition.getCompleteName(), updateTime, lowWatermark.getValue()));
}
} catch (UpdateNotFoundException e) {
log.error(String.format("Not creating workunit for %s as update time was not found. %s",
sourcePartition.getCompleteName(), e.getMessage()));
} catch (SchemaNotFoundException e) {
log.error(String.format("Not creating workunit for %s as schema was not found. %s",
sourcePartition.getCompleteName(), e.getMessage()));
} catch (UncheckedExecutionException e) {
log.error(String.format("Not creating workunit for %s because an unchecked exception occurred. %s",
sourcePartition.getCompleteName(), e.getMessage()));
}
}
}
@Deprecated
protected HiveWorkUnit workUnitForPartition(HiveDataset hiveDataset, Partition partition) throws IOException {
return this.workUnitForPartition(hiveDataset, partition, false);
}
protected HiveWorkUnit workUnitForPartition(HiveDataset hiveDataset, Partition partition, boolean disableAvroCheck) throws IOException {
HiveWorkUnit hiveWorkUnit = new HiveWorkUnit(hiveDataset, partition);
if (disableAvroCheck || isAvro(hiveDataset.getTable())) {
hiveWorkUnit.setTableSchemaUrl(this.avroSchemaManager.getSchemaUrl(hiveDataset.getTable()));
hiveWorkUnit.setPartitionSchemaUrl(this.avroSchemaManager.getSchemaUrl(partition));
}
return hiveWorkUnit;
}
/***
* Check if path of Hive entity (table / partition) contains location token that should be ignored. If so, ignore
* the partition.
*/
protected boolean shouldCreateWorkUnit(Path dataLocation) {
if (null == this.ignoreDataPathIdentifierList || this.ignoreDataPathIdentifierList.size() == 0) {
return true;
}
for (String pathToken : this.ignoreDataPathIdentifierList) {
if (dataLocation.toString().toLowerCase().contains(pathToken.toLowerCase())) {
return false;
}
}
return true;
}
protected boolean shouldCreateWorkunit(Partition sourcePartition, LongWatermark lowWatermark) throws UpdateNotFoundException {
long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
long createTime = getCreateTime(sourcePartition);
return shouldCreateWorkunit(createTime, updateTime, lowWatermark);
}
protected boolean shouldCreateWorkunit(Table table, LongWatermark lowWatermark)
throws UpdateNotFoundException {
long updateTime = this.updateProvider.getUpdateTime(table);
long createTime = getCreateTime(table);
return shouldCreateWorkunit(createTime, updateTime, lowWatermark);
}
/**
* Check if workunit needs to be created. Returns <code>true</code> If the
* <code>updateTime</code> is greater than the <code>lowWatermark</code> and <code>maxLookBackTime</code>
* <code>createTime</code> is not used. It exists for backward compatibility
*/
protected boolean shouldCreateWorkunit(long createTime, long updateTime, LongWatermark lowWatermark) {
if (new DateTime(updateTime).isBefore(this.maxLookBackTime)) {
return false;
}
return new DateTime(updateTime).isAfter(lowWatermark.getValue());
}
/**
* Do not create workunit if a partition was created before the lookbackTime
*/
@VisibleForTesting
public boolean isOlderThanLookback(Partition partition) {
return new DateTime(getCreateTime(partition)).isBefore(this.maxLookBackTime);
}
@VisibleForTesting
public static long getCreateTime(Partition partition) {
// If create time is set, use it.
// .. this is always set if HiveJDBC or Hive mestastore is used to create partition.
// .. it might not be set (ie. equals 0) if Thrift API call is used to create partition.
if (partition.getTPartition().getCreateTime() > 0) {
return TimeUnit.MILLISECONDS.convert(partition.getTPartition().getCreateTime(), TimeUnit.SECONDS);
}
// Try to use distcp-ng registration generation time if it is available
else if (partition.getTPartition().isSetParameters()
&& partition.getTPartition().getParameters().containsKey(DISTCP_REGISTRATION_GENERATION_TIME_KEY)) {
log.debug("Did not find createTime in Hive partition, used distcp registration generation time.");
return Long.parseLong(partition.getTPartition().getParameters().get(DISTCP_REGISTRATION_GENERATION_TIME_KEY));
} else {
log.warn(String.format("Could not find create time for partition %s. Will return createTime as 0",
partition.getCompleteName()));
return 0;
}
}
// Convert createTime from seconds to milliseconds
protected static long getCreateTime(Table table) {
return TimeUnit.MILLISECONDS.convert(table.getTTable().getCreateTime(), TimeUnit.SECONDS);
}
@Override
public Extractor getExtractor(WorkUnitState state) throws IOException {
try {
return classAliasResolver.resolveClass(state.getProp(HIVE_SOURCE_EXTRACTOR_TYPE, DEFAULT_HIVE_SOURCE_EXTRACTOR_TYPE))
.newInstance().createExtractor(state, getSourceFs(state));
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public void shutdown(SourceState state) {
}
public static FileSystem getSourceFs(State state) throws IOException {
if (state.contains(HIVE_SOURCE_FS_URI)) {
return FileSystem.get(URI.create(state.getProp(HIVE_SOURCE_FS_URI)), HadoopUtils.getConfFromState(state));
}
return FileSystem.get(HadoopUtils.getConfFromState(state));
}
/**
* Hive logging is too verbose at INFO level. Currently hive does not have a way to set log level.
* This is a workaround to set log level to WARN for hive loggers only
*/
private void silenceHiveLoggers() {
List<String> loggers = ImmutableList.of("org.apache.hadoop.hive", "org.apache.hive", "hive.ql.parse");
for (String name : loggers) {
Logger logger = Logger.getLogger(name);
if (logger != null) {
logger.setLevel(Level.WARN);
}
}
}
private boolean isAvro(Table table) {
return AvroSerDe.class.getName().equals(table.getSd().getSerdeInfo().getSerializationLib());
}
}
| 2,500 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/source/BackfillHiveSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.source;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import com.google.common.collect.Sets;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* A {@link HiveSource} used to create workunits without a watermark check.
* {@link #shouldCreateWorkunit(long, LongWatermark)} will always return <code>true</code>
*/
public class BackfillHiveSource extends HiveAvroToOrcSource {
/**
* A comma separated list of {@link Partition#getCompleteName()}s that need backfill.
* If not set, all partitions will be backfilled
* <p>
* E.g. service@logEvent@datepartition=2016-08-04-00,service@logEvent@datepartition=2016-08-05-00
* </p>
*
*/
@VisibleForTesting
public static final String BACKFILL_SOURCE_PARTITION_WHITELIST_KEY = "hive.backfillSource.partitions.whitelist";
private Set<String> partitionsWhitelist;
@VisibleForTesting
public void initBackfillHiveSource(SourceState state) {
this.partitionsWhitelist =
Sets.newHashSet(Splitter.on(",").omitEmptyStrings().trimResults().split(state.getProp(BACKFILL_SOURCE_PARTITION_WHITELIST_KEY,
StringUtils.EMPTY)));
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
initBackfillHiveSource(state);
return super.getWorkunits(state);
}
// Non partitioned tables
@Override
public boolean shouldCreateWorkunit(long createTime, long updateTime, LongWatermark lowWatermark) {
return true;
}
//Partitioned tables
@Override
public boolean shouldCreateWorkunit(Partition sourcePartition, LongWatermark lowWatermark) {
// If a whitelist is provided only create workunits for those partitions
if (!this.partitionsWhitelist.isEmpty()) {
return this.partitionsWhitelist.contains(sourcePartition.getCompleteName());
}
// If no whitelist is set, all partitions of a dataset are backfilled
return true;
}
@Override
public boolean isOlderThanLookback(Partition partition) {
// If partition whitelist is provided, ignore lookback
if (!this.partitionsWhitelist.isEmpty()) {
return false;
} else {
return super.isOlderThanLookback(partition);
}
}
}
| 2,501 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/watermarker/TableLevelWatermarker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* A {@link HiveSourceWatermarker} that manages {@link Watermark} at a per {@link Table} basis.
* <ul>
* <li>One {@link Watermark} per table exists.
* <li>All {@link Partition}s of a {@link Table} have the same {@link Watermark}.
* <li>The time at which the job processed a {@link Table} for workunit creation is used as {@link Watermark}
* </ul>
*
*/
@Slf4j
public class TableLevelWatermarker implements HiveSourceWatermarker {
public static final Gson GSON = new Gson();
// Table complete name db@tb - list of previous workunitState
protected Map<String, LongWatermark> tableWatermarks;
public TableLevelWatermarker(State state) {
this.tableWatermarks = Maps.newHashMap();
// Load previous watermarks in case of sourceState
if (state instanceof SourceState) {
SourceState sourceState = (SourceState)state;
for (Map.Entry<String, Iterable<WorkUnitState>> datasetWorkUnitStates : sourceState
.getPreviousWorkUnitStatesByDatasetUrns().entrySet()) {
// Use the minimum of all previous watermarks for this dataset
List<LongWatermark> previousWatermarks = FluentIterable.from(datasetWorkUnitStates.getValue())
.filter(Predicates.not(PartitionLevelWatermarker.WATERMARK_WORKUNIT_PREDICATE))
.transform(new Function<WorkUnitState, LongWatermark>() {
@Override
public LongWatermark apply(WorkUnitState w) {
return w.getActualHighWatermark(LongWatermark.class);
}
}).toList();
if (!previousWatermarks.isEmpty()) {
this.tableWatermarks.put(datasetWorkUnitStates.getKey(), Collections.min(previousWatermarks));
}
}
log.debug("Loaded table watermarks from previous state " + this.tableWatermarks);
}
}
@Override
public LongWatermark getPreviousHighWatermark(Table table) {
if (this.tableWatermarks.containsKey(table.getCompleteName())) {
return this.tableWatermarks.get(table.getCompleteName());
}
return new LongWatermark(0);
}
@Override
public LongWatermark getPreviousHighWatermark(Partition partition) {
return getPreviousHighWatermark(partition.getTable());
}
@Override
public void onTableProcessBegin(Table table, long tableProcessTime) {}
@Override
public void onPartitionProcessBegin(Partition partition, long partitionProcessTime, long partitionUpdateTime) {}
@Override
public void onGetWorkunitsEnd(List<WorkUnit> workunits) {}
@Override
public LongWatermark getExpectedHighWatermark(Table table, long tableProcessTime) {
return new LongWatermark(tableProcessTime);
}
@Override
public LongWatermark getExpectedHighWatermark(Partition partition, long tableProcessTime, long partitionProcessTime) {
return getExpectedHighWatermark(partition.getTable(), tableProcessTime);
}
@Override
public void setActualHighWatermark(WorkUnitState wus) {
wus.setActualHighWatermark(wus.getWorkunit().getExpectedHighWatermark(LongWatermark.class));
}
/**
* Factory to create a {@link TableLevelWatermarker}
*/
public static class Factory implements HiveSourceWatermarkerFactory {
@Override
public TableLevelWatermarker createFromState(State state) {
return new TableLevelWatermarker(state);
}
}
}
| 2,502 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/watermarker/PartitionLevelWatermarker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.thrift.TException;
import org.joda.time.DateTime;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Throwables;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import javax.annotation.Nonnull;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter;
import org.apache.gobblin.data.management.conversion.hive.provider.HiveUnitUpdateProvider;
import org.apache.gobblin.data.management.conversion.hive.provider.UpdateNotFoundException;
import org.apache.gobblin.data.management.conversion.hive.provider.UpdateProviderFactory;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.AutoReturnableObject;
/**
* A {@link HiveSourceWatermarker} that maintains watermarks for each {@link Partition}. For every {@link Table} it creates
* a NoOp {@link WorkUnit} that stores watermarks for all its {@link Partition}s. The Noop workunit is identified by the
* property {@link #IS_WATERMARK_WORKUNIT_KEY} being set to true.
* <p>
* The watermark used is the update time of a {@link Partition} (for partitioned tables)
* or update time of a {@link Table} (for non partitioned tables).
* </p>
* <p>
* The watermark is stored as a {@link MultiKeyValueLongWatermark} which is an extension to {@link Map} that implements
* gobblin's {@link Watermark}. The key is a {@link Partition} identifier and value is the watermark for this {@link Partition}
* </p>
* <p>
* Watermarks for all {@link Partition}s modified after {@link HiveSource#HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY} are retained.
* The default is {@link HiveSource#DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS}. If a previous watermark is not found for
* as partition, it returns 0 as the watermark
* </p>
* <p>
* Watermark workunits are not created for non partitioned tables
* </p>
*/
@Slf4j
public class PartitionLevelWatermarker implements HiveSourceWatermarker {
public static final String IS_WATERMARK_WORKUNIT_KEY = "hive.source.watermark.isWatermarkWorkUnit";
private static final Joiner PARTITION_VALUES_JOINER = Joiner.on(",");
static final Predicate<WorkUnitState> WATERMARK_WORKUNIT_PREDICATE = new Predicate<WorkUnitState>() {
@Override
public boolean apply(@Nonnull WorkUnitState input) {
return input.contains(IS_WATERMARK_WORKUNIT_KEY);
}
};
@Setter(AccessLevel.PACKAGE)
@VisibleForTesting
protected long leastWatermarkToPersistInState;
// Keep an additional 2 days of updates
protected static final int BUFFER_WATERMARK_DAYS_TO_PERSIST = 2;
/**
* Watermarks from previous state
*/
@Getter(AccessLevel.PACKAGE)
@VisibleForTesting
protected final TableWatermarks previousWatermarks;
/**
* Current expected watermarks
*/
@Getter(AccessLevel.PACKAGE)
@VisibleForTesting
protected final TableWatermarks expectedHighWatermarks;
protected final HiveMetastoreClientPool pool;
/**
* Delegates watermarking logic to {@link TableLevelWatermarker} for Non partitioned tables
*/
protected final TableLevelWatermarker tableLevelWatermarker;
protected final HiveUnitUpdateProvider updateProvider;
/**
* Reads and initialized the previous high watermarks from {@link SourceState#getPreviousDatasetStatesByUrns()}
*/
public PartitionLevelWatermarker(State state) {
this.expectedHighWatermarks = new TableWatermarks();
this.previousWatermarks = new TableWatermarks();
this.tableLevelWatermarker = new TableLevelWatermarker(state);
this.updateProvider = UpdateProviderFactory.create(state);
try {
this.pool =
HiveMetastoreClientPool.get(state.getProperties(),
Optional.fromNullable(state.getProp(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
} catch (IOException e) {
throw new RuntimeException("Could not initialize metastore client pool", e);
}
int maxLookBackDays =
state.getPropAsInt(HiveSource.HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY,
HiveSource.DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS) + BUFFER_WATERMARK_DAYS_TO_PERSIST;
this.leastWatermarkToPersistInState = new DateTime().minusDays(maxLookBackDays).getMillis();
// Load previous watermarks in case of sourceState
if (state instanceof SourceState) {
SourceState sourceState = (SourceState) state;
for (Map.Entry<String, Iterable<WorkUnitState>> datasetWorkUnitStates : sourceState
.getPreviousWorkUnitStatesByDatasetUrns().entrySet()) {
List<WorkUnitState> watermarkWorkUnits =
Lists.newArrayList(Iterables.filter(datasetWorkUnitStates.getValue(), WATERMARK_WORKUNIT_PREDICATE));
if (watermarkWorkUnits.isEmpty()) {
log.info(String.format("No previous partition watermarks for table %s", datasetWorkUnitStates.getKey()));
continue;
} else if (watermarkWorkUnits.size() > 1) {
throw new IllegalStateException(
String
.format(
"Each table should have only 1 watermark workunit that contains watermarks for all its partitions. Found %s",
watermarkWorkUnits.size()));
} else {
MultiKeyValueLongWatermark multiKeyValueLongWatermark =
watermarkWorkUnits.get(0).getActualHighWatermark(MultiKeyValueLongWatermark.class);
if (multiKeyValueLongWatermark != null) {
this.previousWatermarks.setPartitionWatermarks(datasetWorkUnitStates.getKey(),
multiKeyValueLongWatermark.getWatermarks());
} else {
log.warn(String.format("Previous workunit for %s has %s set but null MultiKeyValueLongWatermark found",
datasetWorkUnitStates.getKey(), IS_WATERMARK_WORKUNIT_KEY));
}
}
}
log.debug("Loaded partition watermarks from previous state " + this.previousWatermarks);
for (String tableKey : this.previousWatermarks.keySet()) {
this.expectedHighWatermarks.setPartitionWatermarks(tableKey,
Maps.newHashMap(this.previousWatermarks.getPartitionWatermarks(tableKey)));
}
}
}
/**
* Initializes the expected high watermarks for a {@link Table}
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#onTableProcessBegin(org.apache.hadoop.hive.ql.metadata.Table, long)
*/
@Override
public void onTableProcessBegin(Table table, long tableProcessTime) {
Preconditions.checkNotNull(table);
if (!this.expectedHighWatermarks.hasPartitionWatermarks(tableKey(table))) {
this.expectedHighWatermarks.setPartitionWatermarks(tableKey(table), Maps.<String, Long> newHashMap());
}
}
/**
* Adds an expected high watermark for this {@link Partition}. Also removes any watermarks for partitions being replaced.
* Replace partitions are read using partition parameter {@link AbstractAvroToOrcConverter#REPLACED_PARTITIONS_HIVE_METASTORE_KEY}.
* Uses the <code>partitionUpdateTime</code> as the high watermark for this <code>partition</code>
*
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#onPartitionProcessBegin(org.apache.hadoop.hive.ql.metadata.Partition, long, long)
*/
@Override
public void onPartitionProcessBegin(Partition partition, long partitionProcessTime, long partitionUpdateTime) {
Preconditions.checkNotNull(partition);
Preconditions.checkNotNull(partition.getTable());
if (!this.expectedHighWatermarks.hasPartitionWatermarks(tableKey(partition.getTable()))) {
throw new IllegalStateException(String.format(
"onPartitionProcessBegin called before onTableProcessBegin for table: %s, partitions: %s",
tableKey(partition.getTable()), partitionKey(partition)));
}
// Remove dropped partitions
Collection<String> droppedPartitions =
Collections2.transform(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(partition),
new Function<Map<String, String>, String>() {
@Override
public String apply(Map<String, String> input) {
return PARTITION_VALUES_JOINER.join(input.values());
}
});
this.expectedHighWatermarks.removePartitionWatermarks(tableKey(partition.getTable()), droppedPartitions);
this.expectedHighWatermarks.addPartitionWatermark(tableKey(partition.getTable()), partitionKey(partition),
partitionUpdateTime);
}
/**
* Delegates to {@link TableLevelWatermarker#getPreviousHighWatermark(Table)}
*
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#getPreviousHighWatermark(org.apache.hadoop.hive.ql.metadata.Table)
*/
@Override
public LongWatermark getPreviousHighWatermark(Table table) {
return this.tableLevelWatermarker.getPreviousHighWatermark(table);
}
/**
* Return the previous high watermark if found in previous state. Else returns 0
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#getPreviousHighWatermark(org.apache.hadoop.hive.ql.metadata.Partition)
*/
@Override
public LongWatermark getPreviousHighWatermark(Partition partition) {
if (this.previousWatermarks.hasPartitionWatermarks(tableKey(partition.getTable()))) {
// If partition has a watermark return.
if (this.previousWatermarks.get(tableKey(partition.getTable())).containsKey(partitionKey(partition))) {
return new LongWatermark(this.previousWatermarks.getPartitionWatermark(tableKey(partition.getTable()),
partitionKey(partition)));
}
}
return new LongWatermark(0);
}
/**
* Adds watermark workunits to <code>workunits</code>. A watermark workunit is a dummy workunit that is skipped by extractor/converter/writer.
* It stores a map of watermarks. The map has one entry per partition with partition watermark as value.
* <ul>
* <li>Add one NoOp watermark workunit for each {@link Table}
* <li>The workunit has an identifier property {@link #IS_WATERMARK_WORKUNIT_KEY} set to true.
* <li>Watermarks for all {@link Partition}s that belong to this {@link Table} are added as {@link Map}
* <li>A maximum of {@link #maxPartitionsPerDataset} are persisted. Watermarks are ordered by most recently modified {@link Partition}s
*
* </ul>
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#onGetWorkunitsEnd(java.util.List)
*/
@Override
public void onGetWorkunitsEnd(List<WorkUnit> workunits) {
try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
for (Map.Entry<String, Map<String, Long>> tableWatermark : this.expectedHighWatermarks.entrySet()) {
String tableKey = tableWatermark.getKey();
Map<String, Long> partitionWatermarks = tableWatermark.getValue();
// Watermark workunits are required only for Partitioned tables
// tableKey is table complete name in the format db@table
if (!(new org.apache.hadoop.hive.ql.metadata.Table(client.get().getTable(
tableKey.split("@")[0], tableKey.split("@")[1])).isPartitioned())) {
continue;
}
// We only keep watermarks for partitions that were updated after leastWatermarkToPersistInState
Map<String, Long> expectedPartitionWatermarks =
ImmutableMap.copyOf(Maps.filterEntries(partitionWatermarks, new Predicate<Map.Entry<String, Long>>() {
@Override
public boolean apply(@Nonnull Map.Entry<String, Long> input) {
return Long.compare(input.getValue(), PartitionLevelWatermarker.this.leastWatermarkToPersistInState) >= 0;
}
}));
// Create dummy workunit to track all the partition watermarks for this table
WorkUnit watermarkWorkunit = WorkUnit.createEmpty();
watermarkWorkunit.setProp(IS_WATERMARK_WORKUNIT_KEY, true);
watermarkWorkunit.setProp(ConfigurationKeys.DATASET_URN_KEY, tableKey);
watermarkWorkunit.setWatermarkInterval(new WatermarkInterval(new MultiKeyValueLongWatermark(
this.previousWatermarks.get(tableKey)), new MultiKeyValueLongWatermark(expectedPartitionWatermarks)));
workunits.add(watermarkWorkunit);
}
} catch (IOException | TException e) {
Throwables.propagate(e);
}
}
/**
*
* {@inheritDoc}
*
* Uses the <code>table</code>'s modified time as watermark. The modified time is read using
* {@link HiveUnitUpdateProvider#getUpdateTime(Table)}
* @throws UpdateNotFoundException if there was an error fetching update time using {@link HiveUnitUpdateProvider#getUpdateTime(Table)}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#getExpectedHighWatermark(org.apache.hadoop.hive.ql.metadata.Table, long)
*/
@Override
public LongWatermark getExpectedHighWatermark(Table table, long tableProcessTime) {
return new LongWatermark(this.updateProvider.getUpdateTime(table));
}
/**
* Get the expected high watermark for this partition
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#getExpectedHighWatermark(org.apache.hadoop.hive.ql.metadata.Partition, long, long)
*/
@Override
public LongWatermark getExpectedHighWatermark(Partition partition, long tableProcessTime, long partitionProcessTime) {
return new LongWatermark(this.expectedHighWatermarks.getPartitionWatermark(tableKey(partition.getTable()),
partitionKey(partition)));
}
/**
* Sets the actual high watermark by reading the expected high watermark
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#setActualHighWatermark(org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public void setActualHighWatermark(WorkUnitState wus) {
if (Boolean.valueOf(wus.getPropAsBoolean(IS_WATERMARK_WORKUNIT_KEY))) {
wus.setActualHighWatermark(wus.getWorkunit().getExpectedHighWatermark(MultiKeyValueLongWatermark.class));
} else {
wus.setActualHighWatermark(wus.getWorkunit().getExpectedHighWatermark(LongWatermark.class));
}
}
@VisibleForTesting
public static String tableKey(Table table) {
return table.getCompleteName();
}
@VisibleForTesting
public static String partitionKey(Partition partition) {
return PARTITION_VALUES_JOINER.join(partition.getValues());
}
/**
* An extension to standard java map with some accessors
*/
@VisibleForTesting
static class TableWatermarks extends ConcurrentHashMap<String, Map<String, Long>> {
private static final long serialVersionUID = 1L;
public TableWatermarks() {
super();
}
void setPartitionWatermarks(String tableKey, Map<String, Long> partitionWatermarks) {
this.put(tableKey, partitionWatermarks);
}
boolean hasPartitionWatermarks(String tableKey) {
return this.containsKey(tableKey);
}
void removePartitionWatermarks(String tableKey, Collection<String> partitionKeys) {
this.get(tableKey).keySet().removeAll(partitionKeys);
}
void addPartitionWatermark(String tableKey, String partitionKey, Long watermark) {
this.get(tableKey).put(partitionKey, watermark);
}
Long getPartitionWatermark(String tableKey, String partitionKey) {
return this.get(tableKey).get(partitionKey);
}
Map<String, Long> getPartitionWatermarks(String tableKey) {
return this.get(tableKey);
}
}
/**
* Factory to create a {@link PartitionLevelWatermarker}
*/
public static class Factory implements HiveSourceWatermarkerFactory {
@Override
public PartitionLevelWatermarker createFromState(State state) {
return new PartitionLevelWatermarker(state);
}
}
}
| 2,503 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/watermarker/MultiKeyValueLongWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import java.math.RoundingMode;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import lombok.AccessLevel;
import lombok.Getter;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.math.LongMath;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import org.apache.gobblin.source.extractor.Watermark;
/**
* A {@link Watermark} that holds multiple key value watermarks. Neither the key nor the value can be null
* It is backed by a {@link ConcurrentHashMap}.
*/
public class MultiKeyValueLongWatermark implements Watermark {
private static final Gson GSON = new Gson();
@Getter(AccessLevel.PACKAGE)
private final Map<String, Long> watermarks;
public MultiKeyValueLongWatermark() {
this.watermarks = Maps.newConcurrentMap();
}
public MultiKeyValueLongWatermark(Map<String, Long> watermarks) {
this.watermarks = watermarks == null ? Maps.<String, Long> newConcurrentMap() : new ConcurrentHashMap<>(watermarks);
}
@Override
public JsonElement toJson() {
return GSON.toJsonTree(this);
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
Preconditions.checkArgument(
lowWatermark instanceof MultiKeyValueLongWatermark && highWatermark instanceof MultiKeyValueLongWatermark,
String.format("lowWatermark and highWatermark are not instances of %s",
MultiKeyValueLongWatermark.class.getSimpleName()));
MultiKeyValueLongWatermark low = (MultiKeyValueLongWatermark) lowWatermark;
MultiKeyValueLongWatermark high = (MultiKeyValueLongWatermark) highWatermark;
long total = 0;
long pulled = 0;
for (Map.Entry<String, Long> entry : low.watermarks.entrySet()) {
if (high.watermarks.containsKey(entry.getKey())) {
total += (high.watermarks.get(entry.getKey()) - entry.getValue());
}
}
for (Map.Entry<String, Long> entry : low.watermarks.entrySet()) {
if (this.watermarks.containsKey(entry.getKey())) {
pulled += (this.watermarks.get(entry.getKey()) - entry.getValue());
}
}
if (pulled > total) {
return 100;
}
return (short) LongMath.divide(pulled * 100, total, RoundingMode.CEILING);
}
}
| 2,504 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/watermarker/HiveSourceWatermarkerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import org.apache.gobblin.configuration.State;
/**
* An interface for creating new {@link HiveSourceWatermarker}s
*/
public interface HiveSourceWatermarkerFactory {
/**
* Create new {@link HiveSourceWatermarker} from {@link State}
*/
public HiveSourceWatermarker createFromState(State state);
}
| 2,505 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/watermarker/HiveSourceWatermarker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import java.util.List;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An interface to read previous high watermarks and write new high watermarks to state.
*/
public interface HiveSourceWatermarker {
/**
* Get high watermark for a {@link Table}. This API is used by the {@link HiveSource} for Non Partitioned hive tables
* @param table for which a high watermark needs to be returned
*/
public LongWatermark getPreviousHighWatermark(Table table);
/**
* Get high watermark for a {@link Partition}. This API is used by the {@link HiveSource} for Partitioned hive tables
* @param partition for which a high watermark needs to be returned
*/
public LongWatermark getPreviousHighWatermark(Partition partition);
/**
* Get the expected high watermark for a {@link Table}. This API is used by the {@link HiveSource} to get Expected
* high watermark for Non Partitioned hive tables
*
* @param table for which a high watermark needs to be returned
* @param tableProcessTime time at which workunit creation started for this table
*/
public LongWatermark getExpectedHighWatermark(Table table, long tableProcessTime);
/**
* Get the expected high watermark for a {@link Partition}.This API is used by the {@link HiveSource} for Partitioned hive tables
*
* @param partition for which a high watermark needs to be returned
* @param tableProcessTime time at which workunit creation started for table this partition belongs to
* @param partitionProcessTime time at which workunit creation started for this partition
*/
public LongWatermark getExpectedHighWatermark(Partition partition, long tableProcessTime, long partitionProcessTime);
/**
* A callback method that {@link HiveSource} executes when workunit creation for a {@link Table} is started.
*
* @param table for which {@link WorkUnit}s will be created
* @param tableProcessTime time at which this callback was called
*/
public void onTableProcessBegin(Table table, long tableProcessTime);
/**
* A callback method that {@link HiveSource} executes when workunit creation for a {@link Partition} is started.
*
* @param partition for which {@link WorkUnit} will be created
* @param partitionProcessTime time at which this callback was executed
* @param partitionUpdateTime time at which this partition was updated
*/
public void onPartitionProcessBegin(Partition partition, long partitionProcessTime, long partitionUpdateTime);
/**
* A callback method executed before a list of workunits is returned by the
* {@link HiveSource#getWorkunits(org.apache.gobblin.configuration.SourceState)} to the caller
*
* @param workunits constructed by {@link HiveSource#getWorkunits(org.apache.gobblin.configuration.SourceState)}
*/
public void onGetWorkunitsEnd(List<WorkUnit> workunits);
/**
* Sets the actual high watermark after data has been published by the {@link DataPublisher}
* @param wus to set the watermark
*/
public void setActualHighWatermark(WorkUnitState wus);
}
| 2,506 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/DatePatternUpdateProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import java.util.Arrays;
import lombok.ToString;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* An update provider that parses the date pattern in the {@link Partition} or {@link Table}s physical data location.
* This parsed date is used as update time.
*/
public class DatePatternUpdateProvider implements HiveUnitUpdateProvider {
@Override
public long getUpdateTime(Partition partition) throws UpdateNotFoundException {
return parseDateForLocation(partition.getTPartition().getSd().getLocation());
}
@Override
public long getUpdateTime(Table table) throws UpdateNotFoundException {
return parseDateForLocation(table.getTTable().getSd().getLocation());
}
private long parseDateForLocation(String location) throws UpdateNotFoundException {
for (Patterns pattern : Patterns.values()) {
String dateString = StringUtils.substringAfterLast(location, pattern.prefix);
if (StringUtils.isNotBlank(dateString)) {
try {
return pattern.dateFormat.parseMillis(dateString);
} catch (IllegalArgumentException | UnsupportedOperationException e) {
throw new UpdateNotFoundException(String.format("Failed parsing date string %s", dateString));
}
}
}
throw new UpdateNotFoundException(String.format("Path %s does not match any date pattern %s", location,
Arrays.toString(Patterns.values())));
}
@ToString
private enum Patterns {
DAILY("/daily/", "yyyy/MM/dd"),
DAILY_LATE("/daily_late/", "yyyy/MM/dd"),
HOURLY("/hourly/", "yyyy/MM/dd/hh"),
HOURLY_LATE("/hourly_late/", "yyyy/MM/dd/hh"),
HOURLY_DEDUPED("/hourly_deduped/", "yyyy/MM/dd/hh");
private final String prefix;
private final DateTimeFormatter dateFormat;
private Patterns(String prefix, String patternString) {
this.prefix = prefix;
this.dateFormat =
DateTimeFormat.forPattern(patternString).withZone(DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME));
}
}
}
| 2,507 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/HdfsBasedUpdateProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import java.io.IOException;
import java.util.concurrent.Callable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import lombok.AllArgsConstructor;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveTable;
/**
* Uses the file modification time of the data location of a {@link HiveTable} or {@link HivePartition} on HDFS
*/
@AllArgsConstructor
public class HdfsBasedUpdateProvider implements HiveUnitUpdateProvider {
private final FileSystem fs;
// Cache modification times of data location to reduce the number of HDFS calls
private static final Cache<Path, Long> PATH_TO_MOD_TIME_CACHE = CacheBuilder.newBuilder().maximumSize(2000).build();
/**
* Get the update time of a {@link Partition}
*
* @return the update time if available, 0 otherwise
*
* {@inheritDoc}
* @see HiveUnitUpdateProvider#getUpdateTime(org.apache.hadoop.hive.ql.metadata.Partition)
*/
@Override
public long getUpdateTime(Partition partition) throws UpdateNotFoundException {
try {
return getUpdateTime(partition.getDataLocation());
} catch (IOException e) {
throw new UpdateNotFoundException(String.format("Failed to get update time for %s", partition.getCompleteName()),
e);
}
}
/**
* Get the update time of a {@link Table}
* @return the update time if available, 0 otherwise
*
* {@inheritDoc}
* @see HiveUnitUpdateProvider#getUpdateTime(org.apache.hadoop.hive.ql.metadata.Table)
*/
@Override
public long getUpdateTime(Table table) throws UpdateNotFoundException {
try {
return getUpdateTime(table.getDataLocation());
} catch (IOException e) {
throw new UpdateNotFoundException(String.format("Failed to get update time for %s.", table.getCompleteName()), e);
}
}
private long getUpdateTime(final Path path) throws IOException, UpdateNotFoundException {
try {
return PATH_TO_MOD_TIME_CACHE.get(path, new Callable<Long>() {
@Override
public Long call() throws Exception {
if (HdfsBasedUpdateProvider.this.fs.exists(path)) {
return HdfsBasedUpdateProvider.this.fs.getFileStatus(path).getModificationTime();
}
throw new UpdateNotFoundException(String.format("Data file does not exist at path %s", path));
}
});
} catch (Exception e) {
throw new IOException(e);
}
}
}
| 2,508 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/HiveUnitUpdateProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
/**
* An interface that finds when new data was added into a {@link Partition} or a {@link Table}
*/
public interface HiveUnitUpdateProvider {
/**
* Get the data update time of a {@link Partition}
*/
public long getUpdateTime(Partition partition) throws UpdateNotFoundException;
/**
* Get the data update time of a {@link Table}
*/
public long getUpdateTime(Table table) throws UpdateNotFoundException;
}
| 2,509 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/UpdateNotFoundException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
/**
* An exception when {@link HiveUnitUpdateProvider} can not find updates
*/
public class UpdateNotFoundException extends RuntimeException {
private static final long serialVersionUID = -3750962295968867238L;
public UpdateNotFoundException() {
super();
}
public UpdateNotFoundException(String message) {
super(message);
}
public UpdateNotFoundException(String message, Throwable cause) {
super(message, cause);
}
public UpdateNotFoundException(Throwable cause) {
super(cause);
}
}
| 2,510 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/UpdateProviderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A factory class to create {@link HiveUnitUpdateProvider}s
*/
@Alpha
@Slf4j
public class UpdateProviderFactory {
private static final String OPTIONAL_HIVE_UNIT_UPDATE_PROVIDER_CLASS_KEY = "hive.unit.updateProvider.class";
private static final String DEFAULT_HIVE_UNIT_UPDATE_PROVIDER_CLASS = HdfsBasedUpdateProvider.class
.getName();
static final String UPDATE_PROVIDER_FS_URI = "hive.unit.updateProvider.fs.uri";
public static HiveUnitUpdateProvider create(State state) {
try {
return (HiveUnitUpdateProvider) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(state.getProp(
OPTIONAL_HIVE_UNIT_UPDATE_PROVIDER_CLASS_KEY, DEFAULT_HIVE_UNIT_UPDATE_PROVIDER_CLASS)),
ImmutableList.<Object>of(getFileSystem(state.getProperties())), ImmutableList.of());
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException | IOException e) {
throw new RuntimeException(e);
}
}
public static HiveUnitUpdateProvider create(Properties properties) {
try {
return (HiveUnitUpdateProvider) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(properties
.getProperty(
OPTIONAL_HIVE_UNIT_UPDATE_PROVIDER_CLASS_KEY, DEFAULT_HIVE_UNIT_UPDATE_PROVIDER_CLASS)),
ImmutableList.<Object>of(getFileSystem(properties)), ImmutableList.of());
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException | IOException e) {
throw new RuntimeException(e);
}
}
private static FileSystem getFileSystem(Properties properties) throws IOException {
String uri = properties.getProperty(UPDATE_PROVIDER_FS_URI);
if (uri == null) {
return FileSystem.get(HadoopUtils.getConfFromProperties(properties));
}
log.info("Using file system URI {}", uri);
return FileSystem.get(URI.create(uri), HadoopUtils.getConfFromProperties(properties));
}
}
| 2,511 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/provider/HiveMetastoreBasedUpdateProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import java.util.concurrent.TimeUnit;
import lombok.NoArgsConstructor;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.gobblin.annotation.Alpha;
/**
* An update provider that uses update metadata from Hive metastore
*/
@Alpha
@NoArgsConstructor
public class HiveMetastoreBasedUpdateProvider implements HiveUnitUpdateProvider {
@Override
public long getUpdateTime(Partition partition) throws UpdateNotFoundException {
// TODO if a table/partition is registered by gobblin an update time will be made available in table properties
// Use the update time instead of create time
return TimeUnit.MILLISECONDS.convert(partition.getTPartition().getCreateTime(), TimeUnit.SECONDS);
}
@Override
public long getUpdateTime(Table table) throws UpdateNotFoundException {
// TODO if a table/partition is registered by gobblin an update time will be made available in table properties
// Use the update time instead of create time
return TimeUnit.MILLISECONDS.convert(table.getTTable().getCreateTime(), TimeUnit.SECONDS);
}
}
| 2,512 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/MaterializeTableQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import com.google.common.collect.Lists;
/**
* A {@link org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator} to materialize a copy of an existing
* Hive table / partition.
*/
public class MaterializeTableQueryGenerator extends HiveMaterializerFromEntityQueryGenerator {
private final HiveConverterUtils.StorageFormat storageFormat;
public MaterializeTableQueryGenerator(WorkUnitState workUnitState) throws IOException {
super(workUnitState, false);
this.storageFormat = HiveConverterUtils.StorageFormat.valueOf(workUnitState.getProp(HiveMaterializer.STORAGE_FORMAT_KEY));
}
@Override
public List<String> generateQueries() {
ensureParentOfStagingPathExists();
return Lists.newArrayList(HiveConverterUtils.generateStagingCTASStatementFromSelectStar(
new HiveDatasetFinder.DbAndTable(this.outputDatabaseName, this.stagingTableName),
new HiveDatasetFinder.DbAndTable(this.inputDbName, this.inputTableName),
this.partitionsDMLInfo, this.storageFormat,
this.stagingDataLocation));
}
}
| 2,513 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.NoopPublisher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
/**
* A {@link TaskFactory} that runs a {@link HiveMaterializer} task.
* This factory is intended to publish data in the task directly, and
* uses a {@link NoopPublisher}.
*/
public class HiveMaterializerTaskFactory implements TaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
try {
return new HiveMaterializer(taskContext);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public DataPublisher createDataPublisher(JobState.DatasetState datasetState) {
return new NoopPublisher(datasetState);
}
}
| 2,514 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.StageableTableMetadata;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.conversion.hive.task.HiveTask;
import org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
import com.google.common.base.Strings;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* A simple {@link HiveTask} for Hive view materialization.
*/
public class HiveMaterializer extends HiveTask {
protected static final String STAGEABLE_TABLE_METADATA_KEY = "internal.hiveMaterializer.stageableTableMetadata";
protected static final String MATERIALIZER_MODE_KEY = "internal.hiveMaterializer.materializerMode";
protected static final String STORAGE_FORMAT_KEY = "internal.hiveMaterializer.storageFormat";
protected static final String QUERY_RESULT_TO_MATERIALIZE_KEY = "internal.hiveMaterializer.queryResultToMaterialize";
/**
* Create a work unit to copy a source table to a target table using a staging table in between.
* @param dataset {@link HiveDataset} for the source table.
* @param destinationTable {@link StageableTableMetadata} specifying staging and target tables metadata.
*/
public static HiveWorkUnit tableCopyWorkUnit(HiveDataset dataset, StageableTableMetadata destinationTable,
@Nullable String partitionName) {
HiveWorkUnit workUnit = new HiveWorkUnit(dataset);
workUnit.setProp(MATERIALIZER_MODE_KEY, MaterializerMode.TABLE_COPY.name());
workUnit.setProp(STAGEABLE_TABLE_METADATA_KEY, HiveSource.GENERICS_AWARE_GSON.toJson(destinationTable));
if (!Strings.isNullOrEmpty(partitionName)) {
workUnit.setPartitionName(partitionName);
}
TaskUtils.setTaskFactoryClass(workUnit, HiveMaterializerTaskFactory.class);
return workUnit;
}
/**
* Create a work unit to materialize a table / view to a target table using a staging table in between.
* @param dataset {@link HiveDataset} for the source table.
* @param storageFormat format in which target table should be written.
* @param destinationTable {@link StageableTableMetadata} specifying staging and target tables metadata.
*/
public static HiveWorkUnit viewMaterializationWorkUnit(HiveDataset dataset, HiveConverterUtils.StorageFormat storageFormat,
StageableTableMetadata destinationTable, @Nullable String partitionName) {
HiveWorkUnit workUnit = new HiveWorkUnit(dataset);
workUnit.setProp(MATERIALIZER_MODE_KEY, MaterializerMode.TABLE_MATERIALIZATION.name());
workUnit.setProp(STORAGE_FORMAT_KEY, storageFormat.name());
workUnit.setProp(STAGEABLE_TABLE_METADATA_KEY, HiveSource.GENERICS_AWARE_GSON.toJson(destinationTable));
if (!Strings.isNullOrEmpty(partitionName)) {
workUnit.setPartitionName(partitionName);
}
TaskUtils.setTaskFactoryClass(workUnit, HiveMaterializerTaskFactory.class);
return workUnit;
}
/**
* Create a work unit to materialize a query to a target table using a staging table in between.
* @param query the query to materialize.
* @param storageFormat format in which target table should be written.
* @param destinationTable {@link StageableTableMetadata} specifying staging and target tables metadata.
*/
public static WorkUnit queryResultMaterializationWorkUnit(String query, HiveConverterUtils.StorageFormat storageFormat,
StageableTableMetadata destinationTable) {
WorkUnit workUnit = new WorkUnit();
workUnit.setProp(MATERIALIZER_MODE_KEY, MaterializerMode.QUERY_RESULT_MATERIALIZATION.name());
workUnit.setProp(STORAGE_FORMAT_KEY, storageFormat.name());
workUnit.setProp(QUERY_RESULT_TO_MATERIALIZE_KEY, query);
workUnit.setProp(STAGEABLE_TABLE_METADATA_KEY, HiveSource.GENERICS_AWARE_GSON.toJson(destinationTable));
TaskUtils.setTaskFactoryClass(workUnit, HiveMaterializerTaskFactory.class);
HiveTask.disableHiveWatermarker(workUnit);
return workUnit;
}
public static StageableTableMetadata parseStageableTableMetadata(WorkUnit workUnit) {
return HiveSource.GENERICS_AWARE_GSON.fromJson(workUnit.getProp(STAGEABLE_TABLE_METADATA_KEY), StageableTableMetadata.class);
}
private enum MaterializerMode {
/** Materialize a table or view into a new table possibly with a new storage format. */
TABLE_MATERIALIZATION {
@Override
public QueryGenerator createQueryGenerator(WorkUnitState state) throws IOException {
return new MaterializeTableQueryGenerator(state);
}
},
/** Copy a table into a new table with the same properties. */
TABLE_COPY {
@Override
public QueryGenerator createQueryGenerator(WorkUnitState state) throws IOException {
return new CopyTableQueryGenerator(state);
}
},
/** Materialize a query into a table. */
QUERY_RESULT_MATERIALIZATION {
@Override
public QueryGenerator createQueryGenerator(WorkUnitState state) throws IOException {
return new QueryBasedMaterializerQueryGenerator(state);
}
};
public abstract QueryGenerator createQueryGenerator(WorkUnitState state) throws IOException;
}
private final QueryGenerator queryGenerator;
public HiveMaterializer(TaskContext taskContext) throws IOException {
super(taskContext);
MaterializerMode materializerMode = MaterializerMode.valueOf(this.workUnitState.getProp(MATERIALIZER_MODE_KEY));
this.queryGenerator = materializerMode.createQueryGenerator(this.workUnitState);
}
@Override
public List<String> generateHiveQueries() {
return queryGenerator.generateQueries();
}
@Override
public QueryBasedHivePublishEntity generatePublishQueries() throws Exception {
return queryGenerator.generatePublishQueries();
}
}
| 2,515 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.data.management.conversion.hive.entities.StageableTableMetadata;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.hadoop.fs.Path;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* A base abstract query generator for {@link HiveMaterializer}.
*/
public abstract class HiveMaterializerQueryGenerator implements QueryGenerator {
protected final FileSystem fs;
protected final StageableTableMetadata outputTableMetadata;
protected final String outputDatabaseName;
protected final String outputTableName;
protected final String outputDataLocation;
protected final String stagingTableName;
protected final String stagingDataLocation;
protected final Optional<org.apache.hadoop.hive.metastore.api.Table> destinationTableMeta;
protected final HiveWorkUnit workUnit;
protected final HiveMetastoreClientPool pool;
protected final WorkUnitState workUnitState;
public HiveMaterializerQueryGenerator(WorkUnitState workUnitState) throws IOException {
this.fs = HiveSource.getSourceFs(workUnitState);
this.pool = HiveMetastoreClientPool.get(workUnitState.getJobState().getProperties(),
Optional.fromNullable(workUnitState.getJobState().getProp(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
this.workUnitState = workUnitState;
this.workUnit = new HiveWorkUnit(workUnitState.getWorkunit());
this.outputTableMetadata = HiveMaterializer.parseStageableTableMetadata(this.workUnit);
this.outputDatabaseName = outputTableMetadata.getDestinationDbName();
this.outputTableName = outputTableMetadata.getDestinationTableName();
this.outputDataLocation = HiveConverterUtils.getOutputDataLocation(outputTableMetadata.getDestinationDataPath());
this.destinationTableMeta = HiveConverterUtils.getDestinationTableMeta(this.outputTableMetadata.getDestinationDbName(),
this.outputTableMetadata.getDestinationTableName(), workUnitState.getProperties()).getLeft();
this.stagingTableName = HiveConverterUtils.getStagingTableName(this.outputTableMetadata.getDestinationStagingTableName());
this.stagingDataLocation = HiveConverterUtils.getStagingDataLocation(this.outputTableMetadata.getDestinationDataPath(), this.stagingTableName);
}
/**
* Returns hive queries to be run as a part of a hive task.
* This does not include publish queries.
*/
@Override
public abstract List<String> generateQueries();
protected void ensureParentOfStagingPathExists() {
try {
Path parentStagingPath = new Path(this.stagingDataLocation).getParent();
if (!this.fs.exists(parentStagingPath)) {
this.fs.mkdirs(parentStagingPath);
}
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
/**
* Retuens a QueryBasedHivePublishEntity which includes publish level queries and cleanup commands.
* @return QueryBasedHivePublishEntity
* @throws DataConversionException
*/
public abstract QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException;
}
| 2,516 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerFromEntityQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter;
import org.apache.gobblin.data.management.conversion.hive.entities.HiveProcessingEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
/**
* An abstract {@link org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator} containing common methods
* for materializing existing tables / partitions / views.
*/
@Slf4j
public abstract class HiveMaterializerFromEntityQueryGenerator extends HiveMaterializerQueryGenerator {
protected final String inputDbName;
protected final String inputTableName;
protected final List<String> sourceDataPathIdentifier;
protected final String stagingDataPartitionDirName;
protected final String stagingDataPartitionLocation;
protected final Map<String, String> partitionsDDLInfo;
protected final Map<String, String> partitionsDMLInfo;
protected final HiveProcessingEntity conversionEntity;
protected final Table sourceTable;
protected final boolean supportTargetPartitioning;
public HiveMaterializerFromEntityQueryGenerator(WorkUnitState workUnitState, boolean supportTargetPartitioning)
throws IOException {
super(workUnitState);
try {
this.conversionEntity = getConversionEntity(this.workUnit);
} catch (TException | HiveException ex) {
throw new IOException(ex);
}
this.sourceTable = this.conversionEntity.getTable();
this.inputDbName = this.sourceTable.getDbName();
this.inputTableName = this.sourceTable.getTableName();
this.sourceDataPathIdentifier = this.outputTableMetadata.getSourceDataPathIdentifier();
this.stagingDataPartitionDirName = HiveConverterUtils.getStagingDataPartitionDirName(conversionEntity, sourceDataPathIdentifier);
this.stagingDataPartitionLocation = stagingDataLocation + Path.SEPARATOR + stagingDataPartitionDirName;
this.partitionsDDLInfo = Maps.newHashMap();
this.partitionsDMLInfo = Maps.newHashMap();
HiveConverterUtils.populatePartitionInfo(conversionEntity, partitionsDDLInfo, partitionsDMLInfo);
this.supportTargetPartitioning = supportTargetPartitioning;
}
private HiveProcessingEntity getConversionEntity(HiveWorkUnit hiveWorkUnit) throws IOException, TException,
HiveException {
try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
HiveDataset dataset = hiveWorkUnit.getHiveDataset();
HiveDatasetFinder.DbAndTable dbAndTable = dataset.getDbAndTable();
Table table = new Table(client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable()));
Partition partition = null;
if (hiveWorkUnit.getPartitionName().isPresent()) {
partition = new Partition(table, client.get()
.getPartition(dbAndTable.getDb(), dbAndTable.getTable(), hiveWorkUnit.getPartitionName().get()));
}
return new HiveProcessingEntity(dataset, table, Optional.fromNullable(partition));
}
}
/**
* Returns a QueryBasedHivePublishEntity which includes publish level queries and cleanup commands.
* @return QueryBasedHivePublishEntity
* @throws DataConversionException
*/
public QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException {
QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity();
List<String> publishQueries = publishEntity.getPublishQueries();
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
List<String> cleanupQueries = publishEntity.getCleanupQueries();
List<String> cleanupDirectories = publishEntity.getCleanupDirectories();
Optional<Schema> avroSchema = Optional.absent();
if(workUnitState.contains(AbstractAvroToOrcConverter.OUTPUT_AVRO_SCHEMA_KEY)) {
avroSchema = Optional.fromNullable(new Schema.Parser().parse(workUnitState.getProp(AbstractAvroToOrcConverter.OUTPUT_AVRO_SCHEMA_KEY)));
}
String createFinalTableDDL =
HiveConverterUtils.generateCreateDuplicateTableDDL(outputDatabaseName, stagingTableName, outputTableName,
outputDataLocation, Optional.of(outputDatabaseName));
publishQueries.add(createFinalTableDDL);
if(avroSchema.isPresent()) {
String alterSchemaDml = HiveConverterUtils.generateAlterTblPropsDML(outputTableName, Optional.of(outputDatabaseName), avroSchema.get());
publishQueries.add(alterSchemaDml);
}
log.debug("Create final table DDL:\n" + createFinalTableDDL);
if (!this.supportTargetPartitioning || partitionsDDLInfo.size() == 0) {
log.debug("Snapshot directory to move: " + stagingDataLocation + " to: " + outputDataLocation);
publishDirectories.put(stagingDataLocation, outputDataLocation);
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
} else {
String finalDataPartitionLocation = outputDataLocation + Path.SEPARATOR + stagingDataPartitionDirName;
Optional<Path> destPartitionLocation =
HiveConverterUtils.getDestinationPartitionLocation(destinationTableMeta, this.workUnitState,
conversionEntity.getPartition().get().getName());
finalDataPartitionLocation = HiveConverterUtils.updatePartitionLocation(finalDataPartitionLocation, this.workUnitState,
destPartitionLocation);
log.debug("Partition directory to move: " + stagingDataPartitionLocation + " to: " + finalDataPartitionLocation);
publishDirectories.put(stagingDataPartitionLocation, finalDataPartitionLocation);
List<String> dropPartitionsDDL =
HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName, partitionsDMLInfo);
log.debug("Drop partitions if exist in final table: " + dropPartitionsDDL);
publishQueries.addAll(dropPartitionsDDL);
List<String> createFinalPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(outputDatabaseName, outputTableName,
finalDataPartitionLocation, partitionsDMLInfo, Optional.<String>absent());
log.debug("Create final partition DDL: " + createFinalPartitionDDL);
publishQueries.addAll(createFinalPartitionDDL);
String dropStagingTableDDL =
HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
publishQueries.addAll(HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName,
AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity)));
}
log.info("Publish partition entity: " + publishEntity);
return publishEntity;
}
}
| 2,517 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/CopyTableQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.events.EventWorkunitUtils;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator} that generates queries to exactly
* copy an input table / partition.
*/
@Slf4j
public class CopyTableQueryGenerator extends HiveMaterializerFromEntityQueryGenerator {
public CopyTableQueryGenerator(WorkUnitState workUnitState) throws IOException {
super(workUnitState, true);
}
/**
* Returns hive queries to be run as a part of a hive task.
* This does not include publish queries.
* @return
*/
@Override
public List<String> generateQueries() {
ensureParentOfStagingPathExists();
List<String> hiveQueries = Lists.newArrayList();
/*
* Setting partition mode to 'nonstrict' is needed to improve readability of the code.
* If we do not set dynamic partition mode to nonstrict, we will have to write partition values also,
* and because hive considers partition as a virtual column, we also have to write each of the column
* name in the query (in place of *) to match source and target columns.
*/
hiveQueries.add("SET hive.exec.dynamic.partition.mode=nonstrict");
Preconditions.checkNotNull(this.workUnit, "Workunit must not be null");
EventWorkunitUtils.setBeginDDLBuildTimeMetadata(this.workUnit, System.currentTimeMillis());
HiveConverterUtils.createStagingDirectory(fs, outputTableMetadata.getDestinationDataPath(),
conversionEntity, this.workUnitState);
// Create DDL statement for table
String createStagingTableDDL =
HiveConverterUtils.generateCreateDuplicateTableDDL(
inputDbName,
inputTableName,
stagingTableName,
stagingDataLocation,
Optional.of(outputDatabaseName));
hiveQueries.add(createStagingTableDDL);
log.debug("Create staging table DDL:\n" + createStagingTableDDL);
String insertInStagingTableDML =
HiveConverterUtils
.generateTableCopy(
inputTableName,
stagingTableName,
conversionEntity.getTable().getDbName(),
outputDatabaseName,
Optional.of(partitionsDMLInfo));
hiveQueries.add(insertInStagingTableDML);
log.debug("Conversion staging DML: " + insertInStagingTableDML);
log.info("Conversion Queries {}\n", hiveQueries);
EventWorkunitUtils.setEndDDLBuildTimeMetadata(workUnit, System.currentTimeMillis());
return hiveQueries;
}
}
| 2,518 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/QueryBasedMaterializerQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link org.apache.gobblin.data.management.conversion.hive.task.QueryGenerator} to materialize the result of a Hive
* query.
*/
@Slf4j
public class QueryBasedMaterializerQueryGenerator extends HiveMaterializerQueryGenerator {
private final String sourceQuery;
private final HiveConverterUtils.StorageFormat storageFormat;
public QueryBasedMaterializerQueryGenerator(WorkUnitState workUnitState) throws IOException {
super(workUnitState);
this.sourceQuery = workUnitState.getProp(HiveMaterializer.QUERY_RESULT_TO_MATERIALIZE_KEY);
this.storageFormat = HiveConverterUtils.StorageFormat.valueOf(workUnitState.getProp(HiveMaterializer.STORAGE_FORMAT_KEY));
}
@Override
public List<String> generateQueries() {
ensureParentOfStagingPathExists();
return Lists.newArrayList(HiveConverterUtils.generateStagingCTASStatement(
new HiveDatasetFinder.DbAndTable(this.outputDatabaseName, this.stagingTableName),
this.sourceQuery,
this.storageFormat,
this.stagingDataLocation));
}
@Override
public QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException {
QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity();
List<String> publishQueries = publishEntity.getPublishQueries();
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
List<String> cleanupQueries = publishEntity.getCleanupQueries();
List<String> cleanupDirectories = publishEntity.getCleanupDirectories();
String createFinalTableDDL =
HiveConverterUtils.generateCreateDuplicateTableDDL(outputDatabaseName, stagingTableName, outputTableName,
outputDataLocation, Optional.of(outputDatabaseName));
publishQueries.add(createFinalTableDDL);
log.debug("Create final table DDL:\n" + createFinalTableDDL);
log.debug("Snapshot directory to move: " + stagingDataLocation + " to: " + outputDataLocation);
publishDirectories.put(stagingDataLocation, outputDataLocation);
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
publishQueries.addAll(HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName,
new HashMap<>()));
log.info("Publish partition entity: " + publishEntity);
return publishEntity;
}
}
| 2,519 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/utils/LineageUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.utils;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Utility functions for tracking lineage in hive conversion workflows
*/
public class LineageUtils {
public static boolean shouldSetLineageInfo(WorkUnit workUnit) {
// Create a HiveWorkUnit from the workunit
HiveWorkUnit hiveWorkUnit = new HiveWorkUnit(workUnit);
if (hiveWorkUnit.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, false)) {
return false;
}
HiveDataset hiveDataset = hiveWorkUnit.getHiveDataset();
return hiveDataset instanceof ConvertibleHiveDataset;
}
public static boolean shouldSetLineageInfo(WorkUnitState workUnitState) {
return shouldSetLineageInfo(workUnitState.getWorkunit());
}
private LineageUtils() {
// cant instantiate
}
}
| 2,520 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/utils/AvroHiveTypeUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.utils;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import java.util.List;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.HiveAvroTypeConstants;
/**
* Utility class that deals with type conversion between Avro and Hive.
*/
@Slf4j
public class AvroHiveTypeUtils {
private AvroHiveTypeUtils() {
}
public static String generateAvroToHiveColumnMapping(Schema schema, Optional<Map<String, String>> hiveColumns,
boolean topLevel, String datasetName) {
if (topLevel && !schema.getType().equals(Schema.Type.RECORD)) {
throw new IllegalArgumentException(String
.format("Schema for table must be of type RECORD. Received type: %s for dataset %s", schema.getType(),
datasetName));
}
StringBuilder columns = new StringBuilder();
boolean isFirst;
switch (schema.getType()) {
case RECORD:
isFirst = true;
if (topLevel) {
for (Schema.Field field : schema.getFields()) {
if (isFirst) {
isFirst = false;
} else {
columns.append(", \n");
}
String type = generateAvroToHiveColumnMapping(field.schema(), hiveColumns, false, datasetName);
if (hiveColumns.isPresent()) {
hiveColumns.get().put(field.name(), type);
}
String flattenSource = field.getProp("flatten_source");
if (StringUtils.isBlank(flattenSource)) {
flattenSource = field.name();
}
columns
.append(String.format(" `%s` %s COMMENT 'from flatten_source %s'", field.name(), type, flattenSource));
}
} else {
columns.append(HiveAvroTypeConstants.AVRO_TO_HIVE_COLUMN_MAPPING_V_12.get(schema.getType())).append("<");
for (Schema.Field field : schema.getFields()) {
if (isFirst) {
isFirst = false;
} else {
columns.append(",");
}
String type = generateAvroToHiveColumnMapping(field.schema(), hiveColumns, false, datasetName);
columns.append("`").append(field.name()).append("`").append(":").append(type);
}
columns.append(">");
}
break;
case UNION:
Optional<Schema> optionalType = isOfOptionType(schema);
if (optionalType.isPresent()) {
Schema optionalTypeSchema = optionalType.get();
columns.append(generateAvroToHiveColumnMapping(optionalTypeSchema, hiveColumns, false, datasetName));
} else {
columns.append(HiveAvroTypeConstants.AVRO_TO_HIVE_COLUMN_MAPPING_V_12.get(schema.getType())).append("<");
isFirst = true;
for (Schema unionMember : schema.getTypes()) {
if (Schema.Type.NULL.equals(unionMember.getType())) {
continue;
}
if (isFirst) {
isFirst = false;
} else {
columns.append(",");
}
columns.append(generateAvroToHiveColumnMapping(unionMember, hiveColumns, false, datasetName));
}
columns.append(">");
}
break;
case MAP:
columns.append(HiveAvroTypeConstants.AVRO_TO_HIVE_COLUMN_MAPPING_V_12.get(schema.getType())).append("<");
columns.append("string,")
.append(generateAvroToHiveColumnMapping(schema.getValueType(), hiveColumns, false, datasetName));
columns.append(">");
break;
case ARRAY:
columns.append(HiveAvroTypeConstants.AVRO_TO_HIVE_COLUMN_MAPPING_V_12.get(schema.getType())).append("<");
columns.append(generateAvroToHiveColumnMapping(schema.getElementType(), hiveColumns, false, datasetName));
columns.append(">");
break;
case NULL:
break;
case BYTES:
case DOUBLE:
case ENUM:
case FIXED:
case FLOAT:
case INT:
case LONG:
case STRING:
case BOOLEAN:
// Handling Avro Logical Types which should always sit in leaf-level.
boolean isLogicalTypeSet = false;
try {
String hiveSpecificLogicalType = generateHiveSpecificLogicalType(schema);
if (StringUtils.isNoneEmpty(hiveSpecificLogicalType)) {
isLogicalTypeSet = true;
columns.append(hiveSpecificLogicalType);
break;
}
} catch (AvroSerdeException ae) {
log.error("Failed to generate logical type string for field" + schema.getName() + " due to:", ae);
}
LogicalType logicalType = LogicalTypes.fromSchemaIgnoreInvalid(schema);
if (logicalType != null) {
switch (logicalType.getName().toLowerCase()) {
case HiveAvroTypeConstants.DATE:
LogicalTypes.Date dateType = (LogicalTypes.Date) logicalType;
dateType.validate(schema);
columns.append("date");
isLogicalTypeSet = true;
break;
case HiveAvroTypeConstants.DECIMAL:
LogicalTypes.Decimal decimalType = (LogicalTypes.Decimal) logicalType;
decimalType.validate(schema);
columns.append(String.format("decimal(%s, %s)", decimalType.getPrecision(), decimalType.getScale()));
isLogicalTypeSet = true;
break;
case HiveAvroTypeConstants.TIME_MILLIS:
LogicalTypes.TimeMillis timeMillsType = (LogicalTypes.TimeMillis) logicalType;
timeMillsType.validate(schema);
columns.append("timestamp");
isLogicalTypeSet = true;
break;
default:
log.error("Unsupported logical type" + schema.getLogicalType().getName() + ", fallback to physical type");
}
}
if (!isLogicalTypeSet) {
columns.append(HiveAvroTypeConstants.AVRO_TO_HIVE_COLUMN_MAPPING_V_12.get(schema.getType()));
}
break;
default:
String exceptionMessage =
String.format("DDL query generation failed for \"%s\" of dataset %s", schema, datasetName);
log.error(exceptionMessage);
throw new AvroRuntimeException(exceptionMessage);
}
return columns.toString();
}
/**
* Referencing org.apache.hadoop.hive.serde2.avro.SchemaToTypeInfo#generateTypeInfo(org.apache.avro.Schema) on
* how to deal with logical types that supported by Hive but not by Avro(e.g. VARCHAR).
*
* If unsupported logical types found, return empty string as a result.
* @param schema Avro schema
* @return
* @throws AvroSerdeException
*/
public static String generateHiveSpecificLogicalType(Schema schema)
throws AvroSerdeException {
// For bytes type, it can be mapped to decimal.
Schema.Type type = schema.getType();
if (type == Schema.Type.STRING && AvroSerDe.VARCHAR_TYPE_NAME
.equalsIgnoreCase(schema.getProp(AvroSerDe.AVRO_PROP_LOGICAL_TYPE))) {
int maxLength = 0;
try {
maxLength = Integer.parseInt(AvroCompatibilityHelper.getSchemaPropAsJsonString(schema,
AvroSerDe.AVRO_PROP_MAX_LENGTH, false, false));
} catch (Exception ex) {
throw new AvroSerdeException("Failed to obtain maxLength value from file schema: " + schema, ex);
}
return String.format("varchar(%s)", maxLength);
} else {
return StringUtils.EMPTY;
}
}
/***
* Check if the Avro Schema is of type OPTION
* ie. [null, TYPE] or [TYPE, null]
* @param schema Avro Schema to check
* @return Optional Avro Typed data if schema is of type OPTION
*/
private static Optional<Schema> isOfOptionType(Schema schema) {
Preconditions.checkNotNull(schema);
// If not of type UNION, cant be an OPTION
if (!Schema.Type.UNION.equals(schema.getType())) {
return Optional.<Schema>absent();
}
// If has more than two members, can't be an OPTION
List<Schema> types = schema.getTypes();
if (null != types && types.size() == 2) {
Schema first = types.get(0);
Schema second = types.get(1);
// One member should be of type NULL and other of non NULL type
if (Schema.Type.NULL.equals(first.getType()) && !Schema.Type.NULL.equals(second.getType())) {
return Optional.of(second);
} else if (!Schema.Type.NULL.equals(first.getType()) && Schema.Type.NULL.equals(second.getType())) {
return Optional.of(first);
}
}
return Optional.<Schema>absent();
}
}
| 2,521 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/extractor/HiveBaseExtractorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.extractor;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.thrift.TException;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Factory interface for {@link HiveBaseExtractor}
*/
public interface HiveBaseExtractorFactory {
HiveBaseExtractor createExtractor(WorkUnitState state, FileSystem sourceFs)
throws IOException, TException, HiveException;
}
| 2,522 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/extractor/HiveBaseExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.extractor;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.source.extractor.Extractor;
/**
* Base {@link Extractor} for extracting from {@link org.apache.gobblin.data.management.conversion.hive.source.HiveSource}
*/
public abstract class HiveBaseExtractor<S, D> implements Extractor<S, D> {
protected HiveWorkUnit hiveWorkUnit;
protected HiveDataset hiveDataset;
protected String dbName;
protected String tableName;
protected HiveMetastoreClientPool pool;
public HiveBaseExtractor(WorkUnitState state) throws IOException {
if (Boolean.valueOf(state.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY))) {
return;
}
this.hiveWorkUnit = new HiveWorkUnit(state.getWorkunit());
this.hiveDataset = hiveWorkUnit.getHiveDataset();
this.dbName = hiveDataset.getDbAndTable().getDb();
this.tableName = hiveDataset.getDbAndTable().getTable();
this.pool = HiveMetastoreClientPool.get(state.getJobState().getProperties(),
Optional.fromNullable(state.getJobState().getProp(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
}
@Override
public long getExpectedRecordCount() {
return 1;
}
/**
* Watermark is not managed by this extractor.
*/
@Override
public long getHighWatermark() {
return 0;
}
@Override
public void close() throws IOException {}
}
| 2,523 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/extractor/HiveConvertExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.extractor;
import java.io.IOException;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.avro.AvroSchemaManager;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHiveConversionEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.SchemaAwareHivePartition;
import org.apache.gobblin.data.management.conversion.hive.entities.SchemaAwareHiveTable;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.util.AutoReturnableObject;
/**
* <p>
* Extracts {@link QueryBasedHiveConversionEntity}s. A {@link QueryBasedHiveConversionEntity} can represent a
* hive table or a hive partition. Note that this extractor does not extract rows of a partition or a table. Entire
* table or partition is considered as a record.
* </p>
* <p>
* From the {@link WorkUnitState} this extractor deserializes the {@link SerializableHiveTable} and optionally a {@link SerializableHivePartition}.
* For these {@link SerializableHiveTable} and {@link SerializableHivePartition}'s the extractor makes a call to the Hive metastore
* to get the corresponding hive {@link org.apache.hadoop.hive.ql.metadata.Table} and hive {@link org.apache.hadoop.hive.ql.metadata.Partition}
* </p>
*/
@Slf4j
public class HiveConvertExtractor extends HiveBaseExtractor<Schema, QueryBasedHiveConversionEntity> {
private List<QueryBasedHiveConversionEntity> conversionEntities = Lists.newArrayList();
public HiveConvertExtractor(WorkUnitState state, FileSystem fs) throws IOException, TException, HiveException {
super(state);
if (Boolean.valueOf(state.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY))) {
log.info("Ignoring Watermark workunit for {}", state.getProp(ConfigurationKeys.DATASET_URN_KEY));
return;
}
if (!(this.hiveDataset instanceof ConvertibleHiveDataset)) {
throw new IllegalStateException("HiveConvertExtractor is only compatible with ConvertibleHiveDataset");
}
ConvertibleHiveDataset convertibleHiveDataset = (ConvertibleHiveDataset) this.hiveDataset;
try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
Table table = client.get().getTable(this.dbName, this.tableName);
SchemaAwareHiveTable schemaAwareHiveTable = new SchemaAwareHiveTable(table, AvroSchemaManager.getSchemaFromUrl(this.hiveWorkUnit.getTableSchemaUrl(), fs));
SchemaAwareHivePartition schemaAwareHivePartition = null;
if (this.hiveWorkUnit.getPartitionName().isPresent() && this.hiveWorkUnit.getPartitionSchemaUrl().isPresent()) {
Partition partition = client.get().getPartition(this.dbName, this.tableName, this.hiveWorkUnit.getPartitionName().get());
schemaAwareHivePartition =
new SchemaAwareHivePartition(table, partition, AvroSchemaManager.getSchemaFromUrl(this.hiveWorkUnit.getPartitionSchemaUrl().get(), fs));
}
QueryBasedHiveConversionEntity entity =
new QueryBasedHiveConversionEntity(convertibleHiveDataset, schemaAwareHiveTable, Optional.fromNullable(schemaAwareHivePartition));
this.conversionEntities.add(entity);
}
}
@Override
public Schema getSchema() throws IOException {
if (this.conversionEntities.isEmpty()) {
return Schema.create(Type.NULL);
}
QueryBasedHiveConversionEntity conversionEntity = this.conversionEntities.get(0);
return conversionEntity.getHiveTable().getAvroSchema();
}
/**
* There is only one record ({@link QueryBasedHiveConversionEntity}) to be read. This {@link QueryBasedHiveConversionEntity} is
* removed from {@link #conversionEntities} list after it is read. So when gobblin runtime calls this method the second time, it returns a null
*/
@Override
public QueryBasedHiveConversionEntity readRecord(QueryBasedHiveConversionEntity reuse) throws DataRecordException, IOException {
if (this.conversionEntities.isEmpty()) {
return null;
}
return this.conversionEntities.remove(0);
}
}
| 2,524 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/extractor/HiveConvertExtractorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.extractor;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.thrift.TException;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Factory for {@link HiveConvertExtractor}
*/
public class HiveConvertExtractorFactory implements HiveBaseExtractorFactory {
public HiveBaseExtractor createExtractor(WorkUnitState state, FileSystem sourceFs)
throws IOException, TException, HiveException {
return new HiveConvertExtractor(state, sourceFs);
}
}
| 2,525 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/writer/HiveQueryExecutionWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.writer;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import com.google.common.base.Optional;
import lombok.AllArgsConstructor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHiveConversionEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.SchemaAwareHivePartition;
import org.apache.gobblin.data.management.conversion.hive.events.EventWorkunitUtils;
import org.apache.gobblin.data.management.conversion.hive.publisher.HiveConvertPublisher;
import org.apache.gobblin.util.HiveJdbcConnector;
import org.apache.gobblin.writer.DataWriter;
import lombok.extern.slf4j.Slf4j;
/**
* The {@link HiveQueryExecutionWriter} is responsible for running the hive query available at
* {@link QueryBasedHiveConversionEntity#getConversionQuery()}
*/
@Slf4j
@AllArgsConstructor
public class HiveQueryExecutionWriter implements DataWriter<QueryBasedHiveConversionEntity> {
private final HiveJdbcConnector hiveJdbcConnector;
private final State workUnit;
private static final String AT_CHAR = "@";
@Override
public void write(QueryBasedHiveConversionEntity hiveConversionEntity) throws IOException {
List<String> conversionQueries = null;
try {
conversionQueries = hiveConversionEntity.getQueries();
EventWorkunitUtils.setBeginConversionDDLExecuteTimeMetadata(this.workUnit, System.currentTimeMillis());
this.hiveJdbcConnector.executeStatements(conversionQueries.toArray(new String[conversionQueries.size()]));
// Adding properties for preserving partitionParams:
addPropsForPublisher(hiveConversionEntity);
EventWorkunitUtils.setEndConversionDDLExecuteTimeMetadata(this.workUnit, System.currentTimeMillis());
} catch (SQLException e) {
StringBuilder sb = new StringBuilder();
sb.append(String.format("Failed to execute queries for %s: ",
hiveConversionEntity.getPartition().isPresent() ? hiveConversionEntity.getPartition().get().getCompleteName()
: hiveConversionEntity.getTable().getCompleteName()));
for (String conversionQuery : conversionQueries) {
sb.append("\nConversion query attempted by Hive Query writer: ");
sb.append(conversionQuery);
}
String message = sb.toString();
log.warn(message);
throw new IOException(message, e);
}
}
/**
* Method to add properties needed by publisher to preserve partition params
*/
private void addPropsForPublisher(QueryBasedHiveConversionEntity hiveConversionEntity) {
if (!hiveConversionEntity.getPartition().isPresent()) {
return;
}
ConvertibleHiveDataset convertibleHiveDataset = hiveConversionEntity.getConvertibleHiveDataset();
for (String format : convertibleHiveDataset.getDestFormats()) {
Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigForFormat =
convertibleHiveDataset.getConversionConfigForFormat(format);
if (!conversionConfigForFormat.isPresent()) {
continue;
}
SchemaAwareHivePartition sourcePartition = hiveConversionEntity.getHivePartition().get();
// Get complete source partition name dbName@tableName@partitionName
String completeSourcePartitionName = StringUtils.join(Arrays
.asList(sourcePartition.getTable().getDbName(), sourcePartition.getTable().getTableName(),
sourcePartition.getName()), AT_CHAR);
ConvertibleHiveDataset.ConversionConfig config = conversionConfigForFormat.get();
// Get complete destination partition name dbName@tableName@partitionName
String completeDestPartitionName = StringUtils.join(
Arrays.asList(config.getDestinationDbName(), config.getDestinationTableName(), sourcePartition.getName()),
AT_CHAR);
workUnit.setProp(HiveConvertPublisher.COMPLETE_SOURCE_PARTITION_NAME, completeSourcePartitionName);
workUnit.setProp(HiveConvertPublisher.COMPLETE_DEST_PARTITION_NAME, completeDestPartitionName);
}
}
@Override
public void commit() throws IOException {}
@Override
public void close() throws IOException {
this.hiveJdbcConnector.close();
}
@Override
public void cleanup() throws IOException {}
@Override
public long recordsWritten() {
return 0;
}
@Override
public long bytesWritten() throws IOException {
return 0;
}
}
| 2,526 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/writer/HiveQueryWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.writer;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHiveConversionEntity;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.avro.Schema;
import org.apache.gobblin.util.HiveJdbcConnector;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* A {@link DataWriterBuilder} for {@link HiveQueryWriterBuilder}
*/
public class HiveQueryWriterBuilder extends DataWriterBuilder<Schema, QueryBasedHiveConversionEntity>{
@Override
public DataWriter<QueryBasedHiveConversionEntity> build() throws IOException {
try {
return new HiveQueryExecutionWriter(HiveJdbcConnector.newConnectorWithProps(this.destination.getProperties().getProperties()),
this.destination.getProperties());
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
| 2,527 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/avro/SchemaNotFoundException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.avro;
/**
* An exception thrown when Schema was not found.
*/
public class SchemaNotFoundException extends RuntimeException {
private static final long serialVersionUID = 1L;
public SchemaNotFoundException() {
super();
}
public SchemaNotFoundException(String message) {
super(message);
}
public SchemaNotFoundException(String message, Throwable cause) {
super(message, cause);
}
public SchemaNotFoundException(Throwable cause) {
super(cause);
}
}
| 2,528 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/avro/AvroSchemaManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.avro;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.serde2.avro.TypeInfoToSchema;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.hash.Hashing;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.hive.avro.HiveAvroSerDeManager;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.HadoopUtils;
/**
* Avro schema for a {@link Partition} or {@link Table} is available at multiple locations. This class is used to decide
* the schema to use. It also creates a temporary schema file on the {@link FileSystem}.
*
* <ul>
* 1. The {@link Schema} can be set as a literal in the serde info<br>
* 2. The {@link Schema} url can set as a property in the serde info<br>
* 3. The {@link Schema} can be inferred using the physical data location of the {@link Table} or {@link Partition}<br>
* </ul>
*
* Callers request for the schema url using {@link #getSchemaUrl(Partition)} or {@link #getSchemaUrl(Table)}.
*<ul>
* In case (1.), the literal is written as a {@link Schema} file under {@link #schemaDir}. The {@link Path} to this file
* is uses as the {@link Schema} url<br>
* In case (2.), the url itself is used as the {@link Schema} url<br>
* In case (3.), a {@link Schema} file is created under {@link #schemaDir} for {@link Schema} of latest data file.<br>
*</ul>
*
* In all three cases the mapping of {@link Schema} to temporary Schema file path is cached.
* If multiple {@link Partition}s have the same {@link Schema} a duplicate schema file in not created. Already existing
* {@link Schema} url for this {@link Schema} is used.
*/
@Slf4j
public class AvroSchemaManager {
private static final String HIVE_SCHEMA_TEMP_DIR_PATH_KEY = "hive.schema.dir";
private static final String DEFAULT_HIVE_SCHEMA_TEMP_DIR_PATH_KEY = "/tmp/gobblin_schemas";
private final FileSystem fs;
/**
* A mapping of {@link Schema} hash to its {@link Path} on {@link FileSystem}
*/
private final Map<String, Path> schemaPaths;
/**
* A temporary directory to hold all Schema files. The path is job id specific.
* Deleting it will not affect other job executions
*/
private final Path schemaDir;
public AvroSchemaManager(FileSystem fs, State state) {
this.fs = fs;
this.schemaPaths = Maps.newHashMap();
this.schemaDir = new Path(state.getProp(HIVE_SCHEMA_TEMP_DIR_PATH_KEY, DEFAULT_HIVE_SCHEMA_TEMP_DIR_PATH_KEY),
state.getProp(ConfigurationKeys.JOB_ID_KEY));
}
/**
* Get the url to <code>table</code>'s avro {@link Schema} file.
*
* @param table whose avro schema is to be returned
* @return a {@link Path} to table's avro {@link Schema} file.
*/
public Path getSchemaUrl(Table table) throws IOException {
return getSchemaUrl(table.getTTable().getSd());
}
/**
* Get the url to <code>partition</code>'s avro {@link Schema} file.
*
* @param partition whose avro schema is to be returned
* @return a {@link Path} to table's avro {@link Schema} file.
*/
public Path getSchemaUrl(Partition partition) throws IOException {
return getSchemaUrl(partition.getTPartition().getSd());
}
/**
* Delete the temporary {@link #schemaDir}
*/
public void cleanupTempSchemas() throws IOException {
HadoopUtils.deleteIfExists(this.fs, this.schemaDir, true);
}
public static Schema getSchemaFromUrl(Path schemaUrl, FileSystem fs) throws IOException {
return AvroUtils.parseSchemaFromFile(schemaUrl, fs);
}
private Path getSchemaUrl(StorageDescriptor sd) throws IOException {
String schemaString = StringUtils.EMPTY;
try {
// Try to fetch from SCHEMA URL
Map<String,String> serdeParameters = sd.getSerdeInfo().getParameters();
if (serdeParameters != null && serdeParameters.containsKey(HiveAvroSerDeManager.SCHEMA_URL)) {
String schemaUrl = serdeParameters.get(HiveAvroSerDeManager.SCHEMA_URL);
if (schemaUrl.startsWith("http")) {
// Fetch schema literal via HTTP GET if scheme is http(s)
schemaString = IOUtils.toString(new URI(schemaUrl), StandardCharsets.UTF_8);
log.debug("Schema string is: " + schemaString);
Schema schema = HiveAvroORCQueryGenerator.readSchemaFromString(schemaString);
return getOrGenerateSchemaFile(schema);
} else {
// .. else fetch from HDFS or local filesystem
return new Path(sd.getSerdeInfo().getParameters().get(HiveAvroSerDeManager.SCHEMA_URL));
}
}
// Try to fetch from SCHEMA LITERAL
else if (serdeParameters != null && serdeParameters.containsKey(HiveAvroSerDeManager.SCHEMA_LITERAL)) {
schemaString = serdeParameters.get(HiveAvroSerDeManager.SCHEMA_LITERAL);
log.debug("Schema string is: " + schemaString);
Schema schema = HiveAvroORCQueryGenerator.readSchemaFromString(schemaString);
return getOrGenerateSchemaFile(schema);
} else { // Generate schema form Hive schema
List<FieldSchema> fields = sd.getCols();
List<String> colNames = fields.stream().map(fs -> fs.getName()).collect(Collectors.toList());
List<TypeInfo> typeInfos = fields.stream().map(fs -> TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()))
.collect(Collectors.toList());
List<String> comments = fields.stream().map(fs -> fs.getComment()).collect(Collectors.toList());
Schema schema = new TypeInfoToSchema().convert(colNames, typeInfos, comments, null, null, null);
return getOrGenerateSchemaFile(schema);
}
} catch (URISyntaxException e) {
log.error(String.format("Failed to parse schema from schema string. Falling back to HDFS schema: %s",
schemaString), e);
}
// Try to fetch from HDFS
Schema schema = AvroUtils.getDirectorySchema(new Path(sd.getLocation()), this.fs, true);
if (schema == null) {
throw new SchemaNotFoundException("Failed to get avro schema");
}
return getOrGenerateSchemaFile(schema);
}
/**
* If url for schema already exists, return the url. If not create a new temporary schema file and return a the url.
*/
private Path getOrGenerateSchemaFile(Schema schema) throws IOException {
Preconditions.checkNotNull(schema, "Avro Schema should not be null");
String hashedSchema = Hashing.sha256().hashString(schema.toString(), StandardCharsets.UTF_8).toString();
if (!this.schemaPaths.containsKey(hashedSchema)) {
Path schemaFilePath = new Path(this.schemaDir, String.valueOf(System.currentTimeMillis() + ".avsc"));
AvroUtils.writeSchemaToFile(schema, schemaFilePath, fs, true);
this.schemaPaths.put(hashedSchema, schemaFilePath);
}
return this.schemaPaths.get(hashedSchema);
}
}
| 2,529 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/QueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.task;
import java.util.List;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
/**
* An interface for generating queries.
*/
public interface QueryGenerator {
/**
* Generates queries to extract/convert/write data
* @return list of queries
*/
List<String> generateQueries();
/**
* Generates queries for publish data
* @return QueryBasedHivePublishEntity containing cleanup and publish queries
* @throws DataConversionException
*/
QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException;
}
| 2,530 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.task;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarkerFactory;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.BaseAbstractTask;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.HiveJdbcConnector;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* An abstract Task that runs a hive job.
* it runs hive queries.
* Implementation classes should implement abstract methods generateHiveQueries() and generatePublishQueries()
* which creates extract/write level queries and publish level queries respectively.
*/
public abstract class HiveTask extends BaseAbstractTask {
private static final String USE_WATERMARKER_KEY = "internal.hiveTask.useWatermarker";
private static final String ADD_FILES = "internal.hiveTask.addFiles";
private static final String ADD_JARS = "internal.hiveTask.addJars";
private static final String SETUP_QUERIES = "internal.hiveTask.setupQueries";
/**
* Disable Hive watermarker. This is necessary when there is no concrete source table where watermark can be inferred.
*/
public static void disableHiveWatermarker(State state) {
state.setProp(USE_WATERMARKER_KEY, Boolean.toString(false));
}
/**
* Add the input file to the Hive session before running the task.
*/
public static void addFile(State state, String file) {
state.setProp(ADD_FILES, state.getProp(ADD_FILES, "") + "," + file);
}
/**
* Add the input jar to the Hive session before running the task.
*/
public static void addJar(State state, String jar) {
state.setProp(ADD_JARS, state.getProp(ADD_JARS, "") + "," + jar);
}
/**
* Run the specified setup query on the Hive session before running the task.
*/
public static void addSetupQuery(State state, String query) {
state.setProp(SETUP_QUERIES, state.getProp(SETUP_QUERIES, "") + ";" + query);
}
protected final TaskContext taskContext;
protected final WorkUnitState workUnitState;
protected final HiveWorkUnit workUnit;
protected final EventSubmitter eventSubmitter;
protected final List<String> hiveExecutionQueries;
protected final QueryBasedHivePublishEntity publishEntity;
protected final HiveJdbcConnector hiveJdbcConnector;
private final List<String> addFiles;
private final List<String> addJars;
private final List<String> setupQueries;
public HiveTask(TaskContext taskContext) {
super(taskContext);
this.taskContext = taskContext;
this.workUnitState = taskContext.getTaskState();
this.workUnit = new HiveWorkUnit(this.workUnitState.getWorkunit());
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, "gobblin.HiveTask")
.build();
this.hiveExecutionQueries = Lists.newArrayList();
this.publishEntity = new QueryBasedHivePublishEntity();
try {
this.hiveJdbcConnector = HiveJdbcConnector.newConnectorWithProps(this.workUnitState.getProperties());
} catch (SQLException se) {
throw new RuntimeException("Error in creating JDBC Connector", se);
}
this.addFiles = this.workUnitState.getPropAsList(ADD_FILES, "");
this.addJars = this.workUnitState.getPropAsList(ADD_JARS, "");
this.setupQueries = Splitter.on(";").trimResults().omitEmptyStrings().splitToList(this.workUnitState.getProp(SETUP_QUERIES, ""));
}
/**
* Generate hive queries to extract data
* @return list of hive queries
* @throws Exception
*/
public abstract List<String> generateHiveQueries() throws Exception;
/**
* Generate publish and cleanup queries for hive datasets/partitions
* @return QueryBasedHivePublishEntity having cleanup and publish queries
* @throws Exception
*/
public abstract QueryBasedHivePublishEntity generatePublishQueries() throws Exception;
protected void executePublishQueries(QueryBasedHivePublishEntity publishEntity) {
Set<String> cleanUpQueries = Sets.newLinkedHashSet();
Set<String> publishQueries = Sets.newLinkedHashSet();
List<String> directoriesToDelete = Lists.newArrayList();
FileSystem fs = null;
try {
fs = HiveSource.getSourceFs(workUnitState);
if (publishEntity.getCleanupQueries() != null) {
cleanUpQueries.addAll(publishEntity.getCleanupQueries());
}
if (publishEntity.getCleanupDirectories() != null) {
directoriesToDelete.addAll(publishEntity.getCleanupDirectories());
}
if (publishEntity.getPublishDirectories() != null) {
// Publish snapshot / partition directories
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
try {
for (Map.Entry<String, String> publishDir : publishDirectories.entrySet()) {
HadoopUtils.renamePath(fs, new Path(publishDir.getKey()), new Path(publishDir.getValue()), true);
}
} catch (Throwable t) {
throw Throwables.propagate(t);
}
}
if (publishEntity.getPublishQueries() != null) {
publishQueries.addAll(publishEntity.getPublishQueries());
}
WorkUnitState wus = this.workUnitState;
this.hiveJdbcConnector.executeStatements(publishQueries.toArray(new String[publishQueries.size()]));
wus.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
if (wus.getPropAsBoolean(USE_WATERMARKER_KEY, true)) {
HiveSourceWatermarker watermarker = GobblinConstructorUtils.invokeConstructor(HiveSourceWatermarkerFactory.class,
wus.getProp(HiveSource.HIVE_SOURCE_WATERMARKER_FACTORY_CLASS_KEY, HiveSource.DEFAULT_HIVE_SOURCE_WATERMARKER_FACTORY_CLASS)).createFromState(wus);
watermarker.setActualHighWatermark(wus);
}
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
log.error("Error in HiveMaterializer generate publish queries", e);
} finally {
try {
this.hiveJdbcConnector.executeStatements(cleanUpQueries.toArray(new String[cleanUpQueries.size()]));
HadoopUtils.deleteDirectories(fs, directoriesToDelete, true, true);
} catch(RuntimeException re) {
throw re;
} catch (Exception e) {
log.error("Failed to cleanup staging entities.", e);
}
}
}
@Override
public void run() {
try {
List<String> queries = generateHiveQueries();
this.hiveJdbcConnector.executeStatements(Lists.transform(this.addFiles, file -> "ADD FILE " + file).toArray(new String[]{}));
this.hiveJdbcConnector.executeStatements(Lists.transform(this.addJars, file -> "ADD JAR " + file).toArray(new String[]{}));
this.hiveJdbcConnector.executeStatements(this.setupQueries.toArray(new String[]{}));
this.hiveJdbcConnector.executeStatements(queries.toArray(new String[queries.size()]));
super.run();
} catch (Exception e) {
this.workingState = WorkUnitState.WorkingState.FAILED;
log.error("Exception in HiveTask generateHiveQueries ", e);
}
}
@Override
public void commit() {
try {
executePublishQueries(generatePublishQueries());
super.commit();
} catch (Exception e) {
this.workingState = WorkUnitState.WorkingState.FAILED;
log.error("Exception in HiveTask generate publish HiveQueries ", e);
}
}
}
| 2,531 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.task;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import static java.util.stream.Collectors.joining;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.gobblin.data.management.conversion.hive.entities.HiveProcessingEntity;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator;
import org.apache.thrift.TException;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.HiveUtils;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.AutoReturnableObject;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* A utility class for converting hive data from one dataset to another.
*/
public class HiveConverterUtils {
@AllArgsConstructor
@Getter
public static enum StorageFormat {
TEXT_FILE("TEXTFILE"),
SEQUENCE_FILE("SEQUENCEFILE"),
ORC("ORC"),
PARQUET("PARQUET"),
AVRO("AVRO"),
RC_FILE("RCFILE");
private final String hiveName;
}
/***
* Subdirectory within destination table directory to publish data
*/
private static final String PUBLISHED_TABLE_SUBDIRECTORY = "final";
/***
* Separators used by Hive
*/
private static final String HIVE_PARTITIONS_INFO = "/";
private static final String HIVE_PARTITIONS_TYPE = ":";
/**
* If the property is set to true then partition dir is overwritten,
* else a new time-stamped partition dir is created to avoid breaking in-flight queries
* Check org.apache.gobblin.data.management.retention.Avro2OrcStaleDatasetCleaner to clean stale directories
*/
public static final String HIVE_DATASET_PARTITION_OVERWRITE = "hive.dataset.partition.overwrite";
public static final boolean DEFAULT_HIVE_DATASET_PARTITION_OVERWRITE = true;
/**
* If the property is set to true then in the destination dir permissions, group won't be explicitly set.
*/
public static final String HIVE_DATASET_DESTINATION_SKIP_SETGROUP = "hive.dataset.destination.skip.setGroup";
public static final boolean DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP = false;
public static String getStagingTableName(String stagingTableNamePrefix) {
int randomNumber = new Random().nextInt(100);
String uniqueStagingTableQualifier = String.format("%s%s", System.currentTimeMillis(), randomNumber);
return stagingTableNamePrefix + "_" + uniqueStagingTableQualifier;
}
/***
* Get the final table location of format: <final table location>/final
* @return final table location.
*/
public static String getOutputDataLocation(String outputDataLocation) {
return outputDataLocation + Path.SEPARATOR + PUBLISHED_TABLE_SUBDIRECTORY;
}
/***
* Get the staging table location of format: <final table location>/<staging table name>
* @param outputDataLocation output table data lcoation.
* @return staging table location.
*/
public static String getStagingDataLocation(String outputDataLocation, String stagingTableName) {
return outputDataLocation + Path.SEPARATOR + stagingTableName;
}
/***
* Generate DDL query to create a duplicate Hive table
* @param inputDbName source DB name
* @param inputTblName source table name
* @param tblName New Hive table name
* @param tblLocation New hive table location
* @param optionalDbName Optional DB name, if not specified it defaults to 'default'
*/
public static String generateCreateDuplicateTableDDL(
String inputDbName,
String inputTblName,
String tblName,
String tblLocation,
Optional<String> optionalDbName) {
Preconditions.checkArgument(StringUtils.isNotBlank(tblName));
Preconditions.checkArgument(StringUtils.isNotBlank(tblLocation));
String dbName = optionalDbName.isPresent() ? optionalDbName.get() : "default";
return String.format("CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`%s` LIKE `%s`.`%s` LOCATION %n '%s' %n",
dbName, tblName, inputDbName, inputTblName, tblLocation);
}
public static String generateAlterTblPropsDML(
String tableName,
Optional<String> optionalDbName,
Schema schema
) {
Preconditions.checkArgument(StringUtils.isNotBlank(tableName));
Preconditions.checkArgument(schema != null);
String dbName = optionalDbName.isPresent() ? optionalDbName.get() : "default";
try {
Pair<String, String> orcSchemaProps = getORCSchemaPropsFromAvroSchema(schema);
String dml = String.format("ALTER TABLE `%s`.`%s` SET TBLPROPERTIES ('columns'='%s', 'columns.types'='%s')", dbName, tableName,
orcSchemaProps.getLeft(), orcSchemaProps.getRight());
return dml;
} catch (Exception e) {
log.error("Cannot generate add partition DDL due to ", e);
throw new RuntimeException(e);
}
}
public static Pair<String, String> getORCSchemaPropsFromAvroSchema(Schema avroSchema) throws SerDeException {
AvroObjectInspectorGenerator objectInspectorGenerator = new AvroObjectInspectorGenerator(avroSchema);
String columns = Joiner.on(",").join(objectInspectorGenerator.getColumnNames());
String columnTypes = Joiner.on(",").join(
objectInspectorGenerator.getColumnTypes().stream().map(x -> x.getTypeName())
.collect(Collectors.toList()));
return new ImmutablePair<>(columns, columnTypes);
}
/**
* Generates a CTAS statement to dump the contents of a table / partition into a new table.
* @param outputDbAndTable output db and table where contents should be written.
* @param sourceEntity source table / partition.
* @param partitionDMLInfo map of partition values.
* @param storageFormat format of output table.
* @param outputTableLocation location where files of output table should be written.
*/
public static String generateStagingCTASStatementFromSelectStar(HiveDatasetFinder.DbAndTable outputDbAndTable,
HiveDatasetFinder.DbAndTable sourceEntity, Map<String, String> partitionDMLInfo,
StorageFormat storageFormat, String outputTableLocation) {
StringBuilder sourceQueryBuilder = new StringBuilder("SELECT * FROM `").append(sourceEntity.getDb())
.append("`.`").append(sourceEntity.getTable()).append("`");
if (partitionDMLInfo != null && !partitionDMLInfo.isEmpty()) {
sourceQueryBuilder.append(" WHERE ");
sourceQueryBuilder.append(partitionDMLInfo.entrySet().stream()
.map(e -> "`" + e.getKey() + "`='" + e.getValue() + "'")
.collect(joining(" AND ")));
}
return generateStagingCTASStatement(outputDbAndTable, sourceQueryBuilder.toString(), storageFormat, outputTableLocation);
}
/**
* Generates a CTAS statement to dump the results of a query into a new table.
* @param outputDbAndTable output db and table where contents should be written.
* @param sourceQuery query to materialize.
* @param storageFormat format of output table.
* @param outputTableLocation location where files of output table should be written.
*/
public static String generateStagingCTASStatement(HiveDatasetFinder.DbAndTable outputDbAndTable,
String sourceQuery, StorageFormat storageFormat, String outputTableLocation) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(outputDbAndTable.getDb()) &&
!Strings.isNullOrEmpty(outputDbAndTable.getTable()), "Invalid output db and table " + outputDbAndTable);
return String.format("CREATE TEMPORARY TABLE `%s`.`%s` STORED AS %s LOCATION '%s' AS %s", outputDbAndTable.getDb(),
outputDbAndTable.getTable(), storageFormat.getHiveName(), outputTableLocation, sourceQuery);
}
/**
* Fills data from input table into output table.
* @param inputTblName input hive table name
* @param outputTblName output hive table name
* @param inputDbName input hive database name
* @param outputDbName output hive database name
* @param optionalPartitionDMLInfo input hive table's partition's name and value
* @return Hive query string
*/
public static String generateTableCopy(
String inputTblName,
String outputTblName,
String inputDbName,
String outputDbName,
Optional<Map<String, String>> optionalPartitionDMLInfo) {
Preconditions.checkArgument(StringUtils.isNotBlank(inputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(inputDbName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputDbName));
StringBuilder dmlQuery = new StringBuilder();
// Insert query
dmlQuery.append(String.format("INSERT OVERWRITE TABLE `%s`.`%s` %n", outputDbName, outputTblName));
if (optionalPartitionDMLInfo.isPresent() && optionalPartitionDMLInfo.get().size() > 0) {
// Partition details
dmlQuery.append(partitionKeyValues(optionalPartitionDMLInfo));
}
dmlQuery.append(String.format("SELECT * FROM `%s`.`%s`", inputDbName, inputTblName));
if (optionalPartitionDMLInfo.isPresent()) {
if (optionalPartitionDMLInfo.get().size() > 0) {
dmlQuery.append(" WHERE ");
String partitionsAndValues = optionalPartitionDMLInfo.get().entrySet().stream()
.map(e -> "`" + e.getKey() + "`='" + e.getValue() + "'")
.collect(joining(" AND "));
dmlQuery.append(partitionsAndValues);
}
}
return dmlQuery.toString();
}
protected static StringBuilder partitionKeyValues(Optional<Map<String, String>> optionalPartitionDMLInfo) {
if (!optionalPartitionDMLInfo.isPresent()) {
return new StringBuilder();
} else {
return new StringBuilder("PARTITION (").append(Joiner.on(", ")
.join(optionalPartitionDMLInfo.get().entrySet().stream().map(e -> "`" + e.getKey() + "`").iterator())).append(") \n");
}
}
/**
* It fills partitionsDDLInfo and partitionsDMLInfo with the partition information
* @param conversionEntity conversion entity to
* @param partitionsDDLInfo partition type information, to be filled by this method
* @param partitionsDMLInfo partition key-value pair, to be filled by this method
*/
public static void populatePartitionInfo(HiveProcessingEntity conversionEntity, Map<String, String> partitionsDDLInfo,
Map<String, String> partitionsDMLInfo) {
String partitionsInfoString = null;
String partitionsTypeString = null;
if (conversionEntity.getPartition().isPresent()) {
partitionsInfoString = conversionEntity.getPartition().get().getName();
partitionsTypeString = conversionEntity.getPartition().get().getSchema().getProperty("partition_columns.types");
}
if (StringUtils.isNotBlank(partitionsInfoString) || StringUtils.isNotBlank(partitionsTypeString)) {
if (StringUtils.isBlank(partitionsInfoString) || StringUtils.isBlank(partitionsTypeString)) {
throw new IllegalArgumentException("Both partitions info and partitions must be present, if one is specified");
}
List<String> pInfo = Splitter.on(HIVE_PARTITIONS_INFO).omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
List<String> pType = Splitter.on(HIVE_PARTITIONS_TYPE).omitEmptyStrings().trimResults().splitToList(partitionsTypeString);
log.debug("PartitionsInfoString: " + partitionsInfoString);
log.debug("PartitionsTypeString: " + partitionsTypeString);
if (pInfo.size() != pType.size()) {
throw new IllegalArgumentException("partitions info and partitions type list should of same size");
}
for (int i = 0; i < pInfo.size(); i++) {
List<String> partitionInfoParts = Splitter.on("=").omitEmptyStrings().trimResults().splitToList(pInfo.get(i));
String partitionType = pType.get(i);
if (partitionInfoParts.size() != 2) {
throw new IllegalArgumentException(
String.format("Partition details should be of the format partitionName=partitionValue. Recieved: %s", pInfo.get(i)));
}
partitionsDDLInfo.put(partitionInfoParts.get(0), partitionType);
partitionsDMLInfo.put(partitionInfoParts.get(0), partitionInfoParts.get(1));
}
}
}
/**
* Creates a staging directory with the permission as in source directory.
* @param fs filesystem object
* @param destination staging directory location
* @param conversionEntity conversion entity used to get source directory permissions
* @param workUnit workunit
*/
public static void createStagingDirectory(FileSystem fs, String destination, HiveProcessingEntity conversionEntity,
WorkUnitState workUnit) {
/*
* Create staging data location with the same permissions as source data location
*
* Note that hive can also automatically create the non-existing directories but it does not
* seem to create it with the desired permissions.
* According to hive docs permissions for newly created directories/files can be controlled using uMask like,
*
* SET hive.warehouse.subdir.inherit.perms=false;
* SET fs.permissions.umask-mode=022;
* Upon testing, this did not work
*/
Path destinationPath = new Path(destination);
try {
FsPermission permission;
String group = null;
if (conversionEntity.getTable().getDataLocation() != null) {
FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
permission = sourceDataFileStatus.getPermission();
group = sourceDataFileStatus.getGroup();
} else {
permission = FsPermission.getDefault();
}
if (!fs.mkdirs(destinationPath, permission)) {
throw new RuntimeException(String.format("Failed to create path %s with permissions %s",
destinationPath, permission));
} else {
fs.setPermission(destinationPath, permission);
// Set the same group as source
if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP, DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
fs.setOwner(destinationPath, null, group);
}
log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission, group));
}
} catch (IOException e) {
Throwables.propagate(e);
}
}
/***
* Get the partition directory name of the format: [hourly_][daily_]<partitionSpec1>[partitionSpec ..]
* @param conversionEntity Conversion entity.
* @param sourceDataPathIdentifier Hints to look in source partition location to prefix the partition dir name
* such as hourly or daily.
* @return Partition directory name.
*/
public static String getStagingDataPartitionDirName(HiveProcessingEntity conversionEntity,
List<String> sourceDataPathIdentifier) {
if (conversionEntity.getPartition().isPresent()) {
StringBuilder dirNamePrefix = new StringBuilder();
String sourceHivePartitionLocation = conversionEntity.getPartition().get().getDataLocation().toString();
if (null != sourceDataPathIdentifier && null != sourceHivePartitionLocation) {
for (String hint : sourceDataPathIdentifier) {
if (sourceHivePartitionLocation.toLowerCase().contains(hint.toLowerCase())) {
dirNamePrefix.append(hint.toLowerCase()).append("_");
}
}
}
return dirNamePrefix + conversionEntity.getPartition().get().getName();
} else {
return StringUtils.EMPTY;
}
}
/**
* Returns the partition data location of a given table and partition
* @param table Hive table
* @param state workunit state
* @param partitionName partition name
* @return partition data location
* @throws DataConversionException
*/
public static Optional<Path> getDestinationPartitionLocation(Optional<Table> table, WorkUnitState state,
String partitionName)
throws DataConversionException {
Optional<org.apache.hadoop.hive.metastore.api.Partition> partitionOptional;
if (!table.isPresent()) {
return Optional.absent();
}
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(state.getJobState().getProperties(),
Optional.fromNullable(state.getJobState().getProp(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
partitionOptional =
Optional.of(client.get().getPartition(table.get().getDbName(), table.get().getTableName(), partitionName));
} catch (NoSuchObjectException e) {
return Optional.absent();
}
if (partitionOptional.isPresent()) {
org.apache.hadoop.hive.ql.metadata.Table qlTable = new org.apache.hadoop.hive.ql.metadata.Table(table.get());
Partition qlPartition =
new Partition(qlTable, partitionOptional.get());
return Optional.of(qlPartition.getDataLocation());
}
} catch (IOException | TException | HiveException e) {
throw new DataConversionException("Could not fetch destination table metadata", e);
}
return Optional.absent();
}
/**
* If partition already exists then new partition location will be a separate time stamp dir
* If partition location is /a/b/c/<oldTimeStamp> then new partition location is /a/b/c/<currentTimeStamp>
* If partition location is /a/b/c/ then new partition location is /a/b/c/<currentTimeStamp>
**/
public static String updatePartitionLocation(String outputDataPartitionLocation, WorkUnitState workUnitState,
Optional<Path> destPartitionLocation)
throws DataConversionException {
if (workUnitState.getPropAsBoolean(HIVE_DATASET_PARTITION_OVERWRITE, DEFAULT_HIVE_DATASET_PARTITION_OVERWRITE)) {
return outputDataPartitionLocation;
}
if (!destPartitionLocation.isPresent()) {
return outputDataPartitionLocation;
}
long timeStamp = System.currentTimeMillis();
return StringUtils.join(Arrays.asList(outputDataPartitionLocation, timeStamp), '/');
}
/**
* Returns a pair of Hive table and its partitions
* @param dbName db name
* @param tableName table name
* @param props properties
* @return a pair of Hive table and its partitions
* @throws DataConversionException
*/
public static Pair<Optional<Table>, Optional<List<Partition>>> getDestinationTableMeta(String dbName,
String tableName, Properties props) {
Optional<Table> table = Optional.<Table>absent();
Optional<List<Partition>> partitions = Optional.<List<Partition>>absent();
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(props,
Optional.fromNullable(props.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
table = Optional.of(client.get().getTable(dbName, tableName));
if (table.isPresent()) {
org.apache.hadoop.hive.ql.metadata.Table qlTable = new org.apache.hadoop.hive.ql.metadata.Table(table.get());
if (qlTable.isPartitioned()) {
partitions = Optional.of(HiveUtils.getPartitions(client.get(), qlTable, Optional.<String>absent()));
}
}
}
} catch (NoSuchObjectException e) {
return ImmutablePair.of(table, partitions);
} catch (IOException | TException e) {
throw new RuntimeException("Could not fetch destination table metadata", e);
}
return ImmutablePair.of(table, partitions);
}
}
| 2,532 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/events/EventConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.events;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
/**
* Event names and metadata names used by hive conversion.
*/
public class EventConstants {
public static final String CONVERSION_NAMESPACE = "gobblin.hive.conversion";
public static final String VALIDATION_NAMESPACE = "gobblin.hive.validation";
public static final String CONVERSION_PREFIX = CONVERSION_NAMESPACE + ".";
public static final String VALIDATION_PREFIX = VALIDATION_NAMESPACE + ".";
//Event names
public static final String CONVERSION_SETUP_EVENT = CONVERSION_PREFIX + "Setup";
public static final String CONVERSION_FIND_HIVE_TABLES_EVENT = CONVERSION_PREFIX + "FindHiveTables";
public static final String CONVERSION_SUCCESSFUL_SLA_EVENT = CONVERSION_PREFIX + "ConversionSuccessful";
public static final String CONVERSION_FAILED_EVENT = CONVERSION_PREFIX + "ConversionFailed";
//Event names
public static final String VALIDATION_SETUP_EVENT = VALIDATION_PREFIX + "Setup";
public static final String VALIDATION_FIND_HIVE_TABLES_EVENT = VALIDATION_PREFIX + "FindHiveTables";
public static final String VALIDATION_SUCCESSFUL_EVENT = VALIDATION_PREFIX + "ValidationSuccessful";
public static final String VALIDATION_FAILED_EVENT = VALIDATION_PREFIX + "ValidationFailed";
public static final String VALIDATION_NOOP_EVENT = VALIDATION_PREFIX + "ValidationNoop";
// Event metadata keys
// The final event metadata name will be SchemaEvolutionDDLNum as SlaEventSubmitter removes the prefix
// SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX
public static final String SCHEMA_EVOLUTION_DDLS_NUM = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "schemaEvolutionDDLNum";
public static final String BEGIN_DDL_BUILD_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "beginDDLBuildTime";
public static final String END_DDL_BUILD_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "endDDLBuildTime";
public static final String BEGIN_CONVERSION_DDL_EXECUTE_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "beginConversionDDLExecuteTime";
public static final String END_CONVERSION_DDL_EXECUTE_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "endConversionDDLExecuteTime";
public static final String BEGIN_PUBLISH_DDL_EXECUTE_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "beginPublishDDLExecuteTime";
public static final String END_PUBLISH_DDL_EXECUTE_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "endPublishDDLExecuteTime";
public static final String WORK_UNIT_CREATE_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "workunitCreateTime";
public static final String BEGIN_GET_WORKUNITS_TIME = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "beginGetWorkunitsTime";
public static final String SOURCE_DATA_LOCATION = SlaEventKeys.EVENT_ADDITIONAL_METADATA_PREFIX + "sourceDataLocation";
}
| 2,533 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/events/EventWorkunitUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.events;
import java.util.List;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Utilities to set event metadata into {@link WorkUnit}s
*/
public class EventWorkunitUtils {
public static final String IS_WATERMARK_WORKUNIT_KEY = "hive.source.watermark.isWatermarkWorkUnit";
/**
* Set SLA event metadata in the workunit. The publisher will use this metadta to publish sla events
*/
public static void setTableSlaEventMetadata(WorkUnit state, Table table, long updateTime, long lowWatermark,
long beginGetWorkunitsTime) {
state.setProp(SlaEventKeys.DATASET_URN_KEY, state.getProp(ConfigurationKeys.DATASET_URN_KEY));
state.setProp(SlaEventKeys.PARTITION_KEY, table.getCompleteName());
state.setProp(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, String.valueOf(updateTime));
// Time when the workunit was created
state.setProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY, System.currentTimeMillis());
state.setProp(EventConstants.WORK_UNIT_CREATE_TIME, state.getProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY));
state.setProp(EventConstants.BEGIN_GET_WORKUNITS_TIME, beginGetWorkunitsTime);
state.setProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY, lowWatermark);
}
/**
* Set SLA event metadata in the workunit. The publisher will use this metadta to publish sla events
*/
public static void setPartitionSlaEventMetadata(WorkUnit state, Table table, Partition partition, long updateTime,
long lowWatermark, long beginGetWorkunitsTime) {
state.setProp(SlaEventKeys.DATASET_URN_KEY, state.getProp(ConfigurationKeys.DATASET_URN_KEY));
state.setProp(SlaEventKeys.PARTITION_KEY, partition.getName());
state.setProp(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, String.valueOf(updateTime));
// Time when the workunit was created
state.setProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY, System.currentTimeMillis());
state.setProp(EventConstants.WORK_UNIT_CREATE_TIME, state.getProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY));
state.setProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY, lowWatermark);
state.setProp(EventConstants.BEGIN_GET_WORKUNITS_TIME, beginGetWorkunitsTime);
state.setProp(EventConstants.SOURCE_DATA_LOCATION, partition.getDataLocation());
}
/**
* Set number of schema evolution DDLs as Sla event metadata
*/
public static void setEvolutionMetadata(State state, List<String> evolutionDDLs) {
state.setProp(EventConstants.SCHEMA_EVOLUTION_DDLS_NUM, evolutionDDLs == null ? 0 : evolutionDDLs.size());
}
public static void setBeginDDLBuildTimeMetadata(State state, long time) {
state.setProp(EventConstants.BEGIN_DDL_BUILD_TIME, Long.toString(time));
}
public static void setEndDDLBuildTimeMetadata(State state, long time) {
state.setProp(EventConstants.END_DDL_BUILD_TIME, Long.toString(time));
}
public static void setBeginConversionDDLExecuteTimeMetadata(State state, long time) {
state.setProp(EventConstants.BEGIN_CONVERSION_DDL_EXECUTE_TIME, Long.toString(time));
}
public static void setEndConversionDDLExecuteTimeMetadata(State state, long time) {
state.setProp(EventConstants.END_CONVERSION_DDL_EXECUTE_TIME, Long.toString(time));
}
public static void setBeginPublishDDLExecuteTimeMetadata(State state, long time) {
state.setProp(EventConstants.BEGIN_PUBLISH_DDL_EXECUTE_TIME, Long.toString(time));
}
public static void setEndPublishDDLExecuteTimeMetadata(State state, long time) {
state.setProp(EventConstants.END_PUBLISH_DDL_EXECUTE_TIME, Long.toString(time));
}
/**
* Sets metadata to indicate whether this is the first time this table or partition is being published.
* @param wus to set if this is first publish for this table or partition
*/
public static void setIsFirstPublishMetadata(WorkUnitState wus) {
if (!Boolean.valueOf(wus.getPropAsBoolean(IS_WATERMARK_WORKUNIT_KEY))) {
LongWatermark previousWatermark = wus.getWorkunit().getLowWatermark(LongWatermark.class);
wus.setProp(SlaEventKeys.IS_FIRST_PUBLISH, (null == previousWatermark || previousWatermark.getValue() == 0));
}
}
}
| 2,534 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/query/HiveAvroORCQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.query;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.util.HiveAvroTypeConstants;
import static org.apache.gobblin.data.management.conversion.hive.entities.StageableTableMetadata.SCHEMA_SOURCE_OF_TRUTH;
import static org.apache.gobblin.data.management.conversion.hive.utils.AvroHiveTypeUtils.generateAvroToHiveColumnMapping;
import static org.apache.gobblin.util.AvroUtils.sanitizeSchemaString;
/***
* Generate Hive queries
*/
@Slf4j
public class HiveAvroORCQueryGenerator {
private static final String SERIALIZED_PUBLISH_TABLE_COMMANDS = "serialized.publish.table.commands";
private static final Gson GSON = new GsonBuilder().setPrettyPrinting().create();
// Table properties keys
public static final String ORC_COMPRESSION_KEY = "orc.compress";
public static final String ORC_ROW_INDEX_STRIDE_KEY = "orc.row.index.stride";
// Default values for Hive DDL / DML query generation
private static final String DEFAULT_DB_NAME = "default";
private static final String DEFAULT_ROW_FORMAT_SERDE = "org.apache.hadoop.hive.ql.io.orc.OrcSerde";
private static final String DEFAULT_ORC_INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
private static final String DEFAULT_ORC_OUTPUT_FORMAT = "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat";
private static final String DEFAULT_ORC_COMPRESSION = "ZLIB";
private static final String DEFAULT_ORC_ROW_INDEX_STRIDE = "268435456";
private static final Properties DEFAULT_TBL_PROPERTIES = new Properties();
static {
DEFAULT_TBL_PROPERTIES.setProperty(ORC_COMPRESSION_KEY, DEFAULT_ORC_COMPRESSION);
DEFAULT_TBL_PROPERTIES.setProperty(ORC_ROW_INDEX_STRIDE_KEY, DEFAULT_ORC_ROW_INDEX_STRIDE);
}
@ToString
public static enum COLUMN_SORT_ORDER {
ASC ("ASC"),
DESC ("DESC");
private final String order;
COLUMN_SORT_ORDER(String s) {
order = s;
}
}
/***
* Generate DDL query to create a different format (default: ORC) Hive table for a given Avro Schema
* @param schema Avro schema to use to generate the DDL for new Hive table
* @param tblName New Hive table name
* @param tblLocation New hive table location
* @param optionalDbName Optional DB name, if not specified it defaults to 'default'
* @param optionalPartitionDDLInfo Optional partition info in form of map of partition key, partition type pair
* If not specified, the table is assumed to be un-partitioned ie of type snapshot
* @param optionalClusterInfo Optional cluster info
* @param optionalSortOrderInfo Optional sort order
* @param optionalNumOfBuckets Optional number of buckets
* @param optionalRowFormatSerde Optional row format serde, default is ORC
* @param optionalInputFormat Optional input format serde, default is ORC
* @param optionalOutputFormat Optional output format serde, default is ORC
* @param tableProperties Optional table properties
* @param isEvolutionEnabled If schema evolution is turned on
* @param destinationTableMeta Optional destination table metadata @return Generated DDL query to create new Hive table
*/
public static String generateCreateTableDDL(Schema schema,
String tblName,
String tblLocation,
Optional<String> optionalDbName,
Optional<Map<String, String>> optionalPartitionDDLInfo,
Optional<List<String>> optionalClusterInfo,
Optional<Map<String, COLUMN_SORT_ORDER>> optionalSortOrderInfo,
Optional<Integer> optionalNumOfBuckets,
Optional<String> optionalRowFormatSerde,
Optional<String> optionalInputFormat,
Optional<String> optionalOutputFormat,
Properties tableProperties,
boolean isEvolutionEnabled,
boolean casePreserved,
Optional<Table> destinationTableMeta,
Map<String, String> hiveColumns) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(StringUtils.isNotBlank(tblName));
Preconditions.checkArgument(StringUtils.isNotBlank(tblLocation));
String dbName = optionalDbName.isPresent() ? optionalDbName.get() : DEFAULT_DB_NAME;
String rowFormatSerde = optionalRowFormatSerde.isPresent() ? optionalRowFormatSerde.get() : DEFAULT_ROW_FORMAT_SERDE;
String inputFormat = optionalInputFormat.isPresent() ? optionalInputFormat.get() : DEFAULT_ORC_INPUT_FORMAT;
String outputFormat = optionalOutputFormat.isPresent() ? optionalOutputFormat.get() : DEFAULT_ORC_OUTPUT_FORMAT;
tableProperties = getTableProperties(tableProperties);
// Start building Hive DDL
// Refer to Hive DDL manual for explanation of clauses:
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Create/Drop/TruncateTable
StringBuilder ddl = new StringBuilder();
// Create statement
ddl.append(String.format("CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`%s` ", dbName, tblName));
// .. open bracket for CREATE
ddl.append("( \n");
// 1. If evolution is enabled, and destination table does not exists
// .. use columns from new schema
// (evolution does not matter if its new destination table)
// 2. If evolution is enabled, and destination table does exists
// .. use columns from new schema
// (alter table will be used before moving data from staging to final table)
// 3. If evolution is disabled, and destination table does not exists
// .. use columns from new schema
// (evolution does not matter if its new destination table)
// 4. If evolution is disabled, and destination table does exists
// .. use columns from destination schema
// Make sure the schema attribute will be updated in source-of-truth attribute.
// Or fall back to default attribute-pair used in Hive for ORC format.
if (tableProperties.containsKey(SCHEMA_SOURCE_OF_TRUTH)) {
tableProperties.setProperty(tableProperties.getProperty(SCHEMA_SOURCE_OF_TRUTH), sanitizeSchemaString(schema.toString()));
tableProperties.remove(SCHEMA_SOURCE_OF_TRUTH);
}
if (isEvolutionEnabled || !destinationTableMeta.isPresent()) {
log.info("Generating DDL using source schema");
ddl.append(generateAvroToHiveColumnMapping(schema, Optional.of(hiveColumns), true, dbName + "." + tblName));
if (casePreserved) {
try {
Pair<String, String> orcSchemaProps = HiveConverterUtils.getORCSchemaPropsFromAvroSchema(schema);
tableProperties.setProperty("columns", orcSchemaProps.getLeft());
tableProperties.setProperty("columns.types", orcSchemaProps.getRight());
} catch (SerDeException e) {
log.error("Cannot generate add partition DDL due to ", e);
throw new RuntimeException(e);
}
}
} else {
log.info("Generating DDL using destination schema");
ddl.append(generateDestinationToHiveColumnMapping(Optional.of(hiveColumns), destinationTableMeta.get()));
}
// .. close bracket for CREATE
ddl.append(") \n");
// Partition info
if (optionalPartitionDDLInfo.isPresent() && optionalPartitionDDLInfo.get().size() > 0) {
ddl.append("PARTITIONED BY ( ");
boolean isFirst = true;
Map<String, String> partitionInfoMap = optionalPartitionDDLInfo.get();
for (Map.Entry<String, String> partitionInfo : partitionInfoMap.entrySet()) {
if (isFirst) {
isFirst = false;
} else {
ddl.append(", ");
}
ddl.append(String.format("`%s` %s", partitionInfo.getKey(), partitionInfo.getValue()));
}
ddl.append(" ) \n");
}
if (optionalClusterInfo.isPresent()) {
if (!optionalNumOfBuckets.isPresent()) {
throw new IllegalArgumentException(
(String.format("CLUSTERED BY requested, but no NUM_BUCKETS specified for table %s.%s", dbName, tblName)));
}
ddl.append("CLUSTERED BY ( ");
boolean isFirst = true;
for (String clusterByCol : optionalClusterInfo.get()) {
if (!hiveColumns.containsKey(clusterByCol)) {
throw new IllegalArgumentException(String.format("Requested CLUSTERED BY column: %s "
+ "is not present in schema for table %s.%s", clusterByCol, dbName, tblName));
}
if (isFirst) {
isFirst = false;
} else {
ddl.append(", ");
}
ddl.append(String.format("`%s`", clusterByCol));
}
ddl.append(" ) ");
if (optionalSortOrderInfo.isPresent() && optionalSortOrderInfo.get().size() > 0) {
Map<String, COLUMN_SORT_ORDER> sortOrderInfoMap = optionalSortOrderInfo.get();
ddl.append("SORTED BY ( ");
isFirst = true;
for (Map.Entry<String, COLUMN_SORT_ORDER> sortOrderInfo : sortOrderInfoMap.entrySet()){
if (!hiveColumns.containsKey(sortOrderInfo.getKey())) {
throw new IllegalArgumentException(String.format(
"Requested SORTED BY column: %s " + "is not present in schema for table %s.%s", sortOrderInfo.getKey(),
dbName, tblName));
}
if (isFirst) {
isFirst = false;
} else {
ddl.append(", ");
}
ddl.append(String.format("`%s` %s", sortOrderInfo.getKey(), sortOrderInfo.getValue()));
}
ddl.append(" ) ");
}
ddl.append(String.format(" INTO %s BUCKETS %n", optionalNumOfBuckets.get()));
} else {
if (optionalSortOrderInfo.isPresent()) {
throw new IllegalArgumentException(
String.format("SORTED BY requested, but no CLUSTERED BY specified for table %s.%s", dbName, tblName));
}
}
// Field Terminal
ddl.append("ROW FORMAT SERDE \n");
ddl.append(String.format(" '%s' %n", rowFormatSerde));
// Stored as ORC
ddl.append("STORED AS INPUTFORMAT \n");
ddl.append(String.format(" '%s' %n", inputFormat));
ddl.append("OUTPUTFORMAT \n");
ddl.append(String.format(" '%s' %n", outputFormat));
// Location
ddl.append("LOCATION \n");
ddl.append(String.format(" '%s' %n", tblLocation));
// Table properties
if (null != tableProperties && tableProperties.size() > 0) {
ddl.append("TBLPROPERTIES ( \n");
boolean isFirst = true;
for (String property : tableProperties.stringPropertyNames()) {
if (isFirst) {
isFirst = false;
} else {
ddl.append(", \n");
}
ddl.append(String.format(" '%s'='%s'", property, tableProperties.getProperty(property)));
}
ddl.append(") \n");
}
return ddl.toString();
}
private static Properties getTableProperties(Properties tableProperties) {
if (null == tableProperties || tableProperties.size() == 0) {
return (Properties) DEFAULT_TBL_PROPERTIES.clone();
}
for (String property : DEFAULT_TBL_PROPERTIES.stringPropertyNames()) {
if (!tableProperties.containsKey(property)) {
tableProperties.put(property, DEFAULT_TBL_PROPERTIES.get(property));
}
}
return tableProperties;
}
/***
* Generate DDL query to create a Hive partition pointing at specific location.
* @param dbName Hive database name.
* @param tableName Hive table name.
* @param partitionLocation Physical location of partition.
* @param partitionsDMLInfo Partitions DML info - a map of partition name and partition value.
* @param format Hive partition file format
* @return Commands to create a partition.
*/
public static List<String> generateCreatePartitionDDL(String dbName, String tableName, String partitionLocation,
Map<String, String> partitionsDMLInfo, Optional<String> format) {
if (null == partitionsDMLInfo || partitionsDMLInfo.size() == 0) {
return Collections.emptyList();
}
// Partition details
StringBuilder partitionSpecs = new StringBuilder();
partitionSpecs.append("PARTITION (");
boolean isFirstPartitionSpec = true;
for (Map.Entry<String, String> partition : partitionsDMLInfo.entrySet()) {
if (isFirstPartitionSpec) {
isFirstPartitionSpec = false;
} else {
partitionSpecs.append(", ");
}
partitionSpecs.append(String.format("`%s`='%s'", partition.getKey(), partition.getValue()));
}
partitionSpecs.append(") \n");
// Create statement
List<String> ddls = Lists.newArrayList();
// Note: Hive does not support fully qualified Hive table names such as db.table for ALTER TABLE in v0.13
// .. hence specifying 'use dbName' as a precursor to rename
// Refer: HIVE-2496
ddls.add(String.format("USE %s%n", dbName));
if (format.isPresent()) {
ddls.add(String
.format("ALTER TABLE `%s` ADD IF NOT EXISTS %s FILEFORMAT %s LOCATION '%s' ", tableName, partitionSpecs,
format.get(), partitionLocation));
} else {
ddls.add(String.format("ALTER TABLE `%s` ADD IF NOT EXISTS %s LOCATION '%s' ", tableName, partitionSpecs,
partitionLocation));
}
return ddls;
}
public static List<String> generateCreatePartitionDDL(String dbName, String tableName, String partitionLocation,
Map<String, String> partitionsDMLInfo) {
return generateCreatePartitionDDL(dbName, tableName, partitionLocation, partitionsDMLInfo,
Optional.<String>absent());
}
/***
* Generate DDL query to drop a Hive table.
* @param dbName Hive database name.
* @param tableName Hive table name.
* @return Command to drop the table.
*/
public static String generateDropTableDDL(String dbName, String tableName) {
return String.format("DROP TABLE IF EXISTS `%s`.`%s`", dbName, tableName);
}
/***
* Use destination table schema to generate column mapping
* @param hiveColumns Optional Map to populate with the generated hive columns for reference of caller
* @param destinationTableMeta destination table metadata
* @return Generate Hive columns with types for given Avro schema
*/
private static String generateDestinationToHiveColumnMapping(
Optional<Map<String, String>> hiveColumns,
Table destinationTableMeta) {
StringBuilder columns = new StringBuilder();
boolean isFirst = true;
List<FieldSchema> fieldList = destinationTableMeta.getSd().getCols();
for (FieldSchema field : fieldList) {
if (isFirst) {
isFirst = false;
} else {
columns.append(", \n");
}
String name = field.getName();
String type = escapeHiveType(field.getType());
String comment = field.getComment();
if (hiveColumns.isPresent()) {
hiveColumns.get().put(name, type);
}
columns.append(String.format(" `%s` %s COMMENT '%s'", name, type, escapeStringForHive(comment)));
}
return columns.toString();
}
/***
* Escape the Hive nested field names.
* @param type Primitive or nested Hive type.
* @return Escaped Hive nested field.
*/
public static String escapeHiveType(String type) {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type);
// Primitive
if (ObjectInspector.Category.PRIMITIVE.equals(typeInfo.getCategory())) {
return type;
}
// List
else if (ObjectInspector.Category.LIST.equals(typeInfo.getCategory())) {
ListTypeInfo listTypeInfo = (ListTypeInfo) typeInfo;
return org.apache.hadoop.hive.serde.serdeConstants.LIST_TYPE_NAME + "<"
+ escapeHiveType(listTypeInfo.getListElementTypeInfo().getTypeName()) + ">";
}
// Map
else if (ObjectInspector.Category.MAP.equals(typeInfo.getCategory())) {
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return org.apache.hadoop.hive.serde.serdeConstants.MAP_TYPE_NAME + "<"
+ escapeHiveType(mapTypeInfo.getMapKeyTypeInfo().getTypeName()) + ","
+ escapeHiveType(mapTypeInfo.getMapValueTypeInfo().getTypeName()) + ">";
}
// Struct
else if (ObjectInspector.Category.STRUCT.equals(typeInfo.getCategory())) {
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<String> allStructFieldNames = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> allStructFieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
StringBuilder sb = new StringBuilder();
sb.append(serdeConstants.STRUCT_TYPE_NAME + "<");
for (int i = 0; i < allStructFieldNames.size(); i++) {
if (i > 0) {
sb.append(",");
}
sb.append("`");
sb.append(allStructFieldNames.get(i));
sb.append("`");
sb.append(":");
sb.append(escapeHiveType(allStructFieldTypeInfos.get(i).getTypeName()));
}
sb.append(">");
return sb.toString();
}
// Union
else if (ObjectInspector.Category.UNION.equals(typeInfo.getCategory())) {
UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
List<TypeInfo> allUnionObjectTypeInfos = unionTypeInfo.getAllUnionObjectTypeInfos();
StringBuilder sb = new StringBuilder();
sb.append(serdeConstants.UNION_TYPE_NAME + "<");
for (int i = 0; i < allUnionObjectTypeInfos.size(); i++) {
if (i > 0) {
sb.append(",");
}
sb.append(escapeHiveType(allUnionObjectTypeInfos.get(i).getTypeName()));
}
sb.append(">");
return sb.toString();
} else {
throw new RuntimeException("Unknown type encountered: " + type);
}
}
/***
* Generate DML mapping query to populate output schema table by selecting from input schema table
* This method assumes that each output schema field has a corresponding source input table's field reference
* .. in form of 'flatten_source' property
* @param inputAvroSchema Input schema that was used to obtain output schema (next argument)
* @param outputOrcSchema Output schema (flattened or nested) that was generated using input schema
* .. and has lineage information compatible with input schema
* @param inputTblName Input table name
* @param outputTblName Output table name
* @param optionalInputDbName Optional input DB name, if not specified it will default to 'default'
* @param optionalOutputDbName Optional output DB name, if not specified it will default to 'default'
* @param optionalPartitionDMLInfo Optional partition info in form of map of partition key, partition value pairs
* @param optionalOverwriteTable Optional overwrite table, if not specified it is set to true
* @param optionalCreateIfNotExists Optional create if not exists, if not specified it is set to false
* @param isEvolutionEnabled If schema evolution is turned on
* @param destinationTableMeta Optional destination table metadata
* @param rowLimit Optional row limit
* @return DML query
*/
public static String generateTableMappingDML(Schema inputAvroSchema,
Schema outputOrcSchema,
String inputTblName,
String outputTblName,
Optional<String> optionalInputDbName,
Optional<String> optionalOutputDbName,
Optional<Map<String, String>> optionalPartitionDMLInfo,
Optional<Boolean> optionalOverwriteTable,
Optional<Boolean> optionalCreateIfNotExists,
boolean isEvolutionEnabled,
Optional<Table> destinationTableMeta,
Optional<Integer> rowLimit) {
Preconditions.checkNotNull(inputAvroSchema);
Preconditions.checkNotNull(outputOrcSchema);
Preconditions.checkArgument(StringUtils.isNotBlank(inputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputTblName));
String inputDbName = optionalInputDbName.isPresent() ? optionalInputDbName.get() : DEFAULT_DB_NAME;
String outputDbName = optionalOutputDbName.isPresent() ? optionalOutputDbName.get() : DEFAULT_DB_NAME;
boolean shouldOverwriteTable = optionalOverwriteTable.isPresent() ? optionalOverwriteTable.get() : true;
boolean shouldCreateIfNotExists = optionalCreateIfNotExists.isPresent() ? optionalCreateIfNotExists.get() : false;
log.debug("Input Schema: " + inputAvroSchema.toString());
log.debug("Output Schema: " + outputOrcSchema.toString());
// Start building Hive DML
// Refer to Hive DDL manual for explanation of clauses:
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries
StringBuilder dmlQuery = new StringBuilder();
// Insert query
if (shouldOverwriteTable) {
dmlQuery.append(String.format("INSERT OVERWRITE TABLE `%s`.`%s` %n", outputDbName, outputTblName));
} else {
dmlQuery.append(String.format("INSERT INTO TABLE `%s`.`%s` %n", outputDbName, outputTblName));
}
// Partition details
if (optionalPartitionDMLInfo.isPresent()) {
if (optionalPartitionDMLInfo.get().size() > 0) {
dmlQuery.append("PARTITION (");
boolean isFirstPartitionSpec = true;
for (Map.Entry<String, String> partition : optionalPartitionDMLInfo.get().entrySet()) {
if (isFirstPartitionSpec) {
isFirstPartitionSpec = false;
} else {
dmlQuery.append(", ");
}
dmlQuery.append(String.format("`%s`='%s'", partition.getKey(), partition.getValue()));
}
dmlQuery.append(") \n");
}
}
// If not exists
if (shouldCreateIfNotExists) {
dmlQuery.append(" IF NOT EXISTS \n");
}
// Select query
dmlQuery.append("SELECT \n");
// 1. If evolution is enabled, and destination table does not exists
// .. use columns from new schema
// (evolution does not matter if its new destination table)
// 2. If evolution is enabled, and destination table does exists
// .. use columns from new schema
// (alter table will be used before moving data from staging to final table)
// 3. If evolution is disabled, and destination table does not exists
// .. use columns from new schema
// (evolution does not matter if its new destination table)
// 4. If evolution is disabled, and destination table does exists
// .. use columns from destination schema
if (isEvolutionEnabled || !destinationTableMeta.isPresent()) {
log.info("Generating DML using source schema");
boolean isFirst = true;
List<Schema.Field> fieldList = outputOrcSchema.getFields();
for (Schema.Field field : fieldList) {
String flattenSource = field.getProp("flatten_source");
String colName;
if (StringUtils.isNotBlank(flattenSource)) {
colName = flattenSource;
} else {
colName = field.name();
}
// Escape the column name
colName = colName.replaceAll("\\.", "`.`");
if (isFirst) {
isFirst = false;
} else {
dmlQuery.append(", \n");
}
dmlQuery.append(String.format(" `%s`", colName));
}
} else {
log.info("Generating DML using destination schema");
boolean isFirst = true;
List<FieldSchema> fieldList = destinationTableMeta.get().getSd().getCols();
for (FieldSchema field : fieldList) {
String colName = StringUtils.EMPTY;
if (field.isSetComment() && field.getComment().startsWith("from flatten_source ")) {
// Retrieve from column (flatten_source) from comment
colName = field.getComment().replaceAll("from flatten_source ", "").trim();
} else {
// Or else find field in flattened schema
List<Schema.Field> evolvedFieldList = outputOrcSchema.getFields();
for (Schema.Field evolvedField : evolvedFieldList) {
if (evolvedField.name().equalsIgnoreCase(field.getName())) {
String flattenSource = evolvedField.getProp("flatten_source");
if (StringUtils.isNotBlank(flattenSource)) {
colName = flattenSource;
} else {
colName = evolvedField.name();
}
break;
}
}
}
// Escape the column name
colName = colName.replaceAll("\\.", "`.`");
// colName can be blank if it is deleted in new evolved schema, so we shouldn't try to fetch it
if (StringUtils.isNotBlank(colName)) {
if (isFirst) {
isFirst = false;
} else {
dmlQuery.append(", \n");
}
dmlQuery.append(String.format(" `%s`", colName));
}
}
}
dmlQuery.append(String.format(" %n FROM `%s`.`%s` ", inputDbName, inputTblName));
// Partition details
if (optionalPartitionDMLInfo.isPresent()) {
if (optionalPartitionDMLInfo.get().size() > 0) {
dmlQuery.append("WHERE ");
boolean isFirstPartitionSpec = true;
for (Map.Entry<String, String> partition : optionalPartitionDMLInfo.get().entrySet()) {
if (isFirstPartitionSpec) {
isFirstPartitionSpec = false;
} else {
dmlQuery.append(" AND ");
}
dmlQuery.append(String.format("`%s`='%s'",
partition.getKey(), partition.getValue()));
}
dmlQuery.append(" \n");
}
}
// Limit clause
if (rowLimit.isPresent()) {
dmlQuery.append(String.format("LIMIT %s", rowLimit.get()));
}
return dmlQuery.toString();
}
public static Schema readSchemaFromString(String schemaStr)
throws IOException {
return new Schema.Parser().parse(schemaStr);
}
/***
* Generate DDLs to evolve final destination table. This DDL should not only contains schema evolution but also
* include table-property updates.
* @param stagingTableName Staging table.
* @param finalTableName Un-evolved final destination table.
* @param optionalStagingDbName Optional staging database name, defaults to default.
* @param optionalFinalDbName Optional final database name, defaults to default.
* @param evolvedSchema Evolved Avro Schema.
* @param isEvolutionEnabled Is schema evolution enabled.
* @param evolvedColumns Evolved columns in Hive format.
* @param destinationTableMeta Destination table metadata.
* @return DDLs to evolve final destination table.
*/
public static List<String> generateEvolutionDDL(String stagingTableName,
String finalTableName,
Optional<String> optionalStagingDbName,
Optional<String> optionalFinalDbName,
Schema evolvedSchema,
boolean isEvolutionEnabled,
Map<String, String> evolvedColumns,
Optional<Table> destinationTableMeta,
Properties tableProperties
) {
// If schema evolution is disabled, then do nothing OR
// If destination table does not exists, then do nothing
if (!isEvolutionEnabled || !destinationTableMeta.isPresent()) {
return Collections.emptyList();
}
String stagingDbName = optionalStagingDbName.isPresent() ? optionalStagingDbName.get() : DEFAULT_DB_NAME;
String finalDbName = optionalFinalDbName.isPresent() ? optionalFinalDbName.get() : DEFAULT_DB_NAME;
List<String> ddl = Lists.newArrayList();
// Evolve schema
Table destinationTable = destinationTableMeta.get();
if (destinationTable.getSd().getCols().size() == 0) {
log.warn("Destination Table: " + destinationTable + " does not has column details in StorageDescriptor. "
+ "It is probably of Avro type. Cannot evolve via traditional HQL, so skipping evolution checks.");
return ddl;
}
for (Map.Entry<String, String> evolvedColumn : evolvedColumns.entrySet()) {
// Find evolved column in destination table
boolean found = false;
for (FieldSchema destinationField : destinationTable.getSd().getCols()) {
if (destinationField.getName().equalsIgnoreCase(evolvedColumn.getKey())) {
// If evolved column is found, but type is evolved - evolve it
// .. if incompatible, isTypeEvolved will throw an exception
boolean typeEvolved;
try {
typeEvolved = isTypeEvolved(evolvedColumn.getValue(), destinationField.getType());
} catch (Exception e) {
throw new RuntimeException(
String.format("Unable to evolve schema for table %s.%s", finalDbName, finalTableName), e);
}
if (typeEvolved) {
ddl.add(String.format("USE %s%n", finalDbName));
ddl.add(String.format("ALTER TABLE `%s` CHANGE COLUMN `%s` `%s` %s COMMENT '%s'",
finalTableName, evolvedColumn.getKey(), evolvedColumn.getKey(), evolvedColumn.getValue(),
escapeStringForHive(destinationField.getComment())));
}
found = true;
break;
}
}
if (!found) {
// If evolved column is not found ie. its new, add this column
String flattenSource = evolvedSchema.getField(evolvedColumn.getKey()).getProp("flatten_source");
if (StringUtils.isBlank(flattenSource)) {
flattenSource = evolvedSchema.getField(evolvedColumn.getKey()).name();
}
// Note: Hive does not support fully qualified Hive table names such as db.table for ALTER TABLE in v0.13
// .. hence specifying 'use dbName' as a precursor to rename
// Refer: HIVE-2496
ddl.add(String.format("USE %s%n", finalDbName));
ddl.add(String.format("ALTER TABLE `%s` ADD COLUMNS (`%s` %s COMMENT 'from flatten_source %s')",
finalTableName, evolvedColumn.getKey(), evolvedColumn.getValue(), flattenSource));
}
}
// Updating table properties.
ddl.add(String.format("USE %s%n", finalDbName));
for (String property :tableProperties.stringPropertyNames()) {
ddl.add(String.format("ALTER TABLE `%s` SET TBLPROPERTIES ('%s'='%s')", finalTableName,
property, tableProperties.getProperty(property)));
}
return ddl;
}
/**
* Generate DDL for dropping partitions of a table.
* <p>
* ALTER TABLE finalTableName DROP IF EXISTS PARTITION partition_spec, PARTITION partition_spec, ...;
* </p>
* @param finalTableName Table name where partitions are dropped
* @param partitionsDMLInfo Partitions to be dropped
* @return DDL to drop partitions in <code>finalTableName</code>
*/
public static List<String> generateDropPartitionsDDL(final String dbName, final String finalTableName,
final Map<String, String> partitionsDMLInfo) {
if (null == partitionsDMLInfo || partitionsDMLInfo.isEmpty()) {
return Collections.emptyList();
}
// Partition details
StringBuilder partitionSpecs = new StringBuilder();
partitionSpecs.append("PARTITION (");
boolean isFirstPartitionSpec = true;
for (Map.Entry<String, String> partition : partitionsDMLInfo.entrySet()) {
if (isFirstPartitionSpec) {
isFirstPartitionSpec = false;
} else {
partitionSpecs.append(", ");
}
partitionSpecs.append(String.format("`%s`='%s'", partition.getKey(), partition.getValue()));
}
partitionSpecs.append(") ");
List<String> ddls = Lists.newArrayList();
// Note: Hive does not support fully qualified Hive table names such as db.table for ALTER TABLE in v0.13
// .. hence specifying 'use dbName' as a precursor to rename
// Refer: HIVE-2496
ddls.add(String.format("USE %s%n", dbName));
ddls.add(String.format("ALTER TABLE %s DROP IF EXISTS %s", finalTableName, partitionSpecs));
return ddls;
}
/**
* Generate DDL for dropping partitions of a table.
* <p>
* ALTER TABLE finalTableName DROP IF EXISTS PARTITION partition_spec, PARTITION partition_spec, ...;
* </p>
* @param finalTableName Table name where partitions are dropped
* @param partitionDMLInfos list of Partition to be dropped
* @return DDL to drop partitions in <code>finalTableName</code>
*/
public static List<String> generateDropPartitionsDDL(final String dbName, final String finalTableName,
final List<Map<String, String>> partitionDMLInfos) {
if (partitionDMLInfos.isEmpty()) {
return Collections.emptyList();
}
List<String> ddls = Lists.newArrayList();
ddls.add(String.format("USE %s %n", dbName));
// Join the partition specs
ddls.add(String.format("ALTER TABLE %s DROP IF EXISTS %s", finalTableName,
Joiner.on(",").join(Iterables.transform(partitionDMLInfos, PARTITION_SPEC_GENERATOR))));
return ddls;
}
/***
* Generate DDL for creating and updating view over a table.
*
* Create view:
* <p>
* CREATE VIEW IF NOT EXISTS db.viewName AS SELECT * FROM db.tableName
* </p>
*
* Update view:
* <p>
* ALTER VIEW db.viewName AS SELECT * FROM db.tableName
* </p>
*
* @param tableDbName Database for the table over which view has to be created.
* @param tableName Table over which view has to be created.
* @param viewDbName Database for the view to be created.
* @param viewName View to be created.
* @param shouldUpdateView If view should be forced re-built.
* @return DDLs to create and / or update view over a table
*/
public static List<String> generateCreateOrUpdateViewDDL(final String tableDbName, final String tableName,
final String viewDbName, final String viewName, final boolean shouldUpdateView) {
Preconditions.checkArgument(StringUtils.isNotBlank(tableName), "Table name should not be empty");
Preconditions.checkArgument(StringUtils.isNotBlank(viewName), "View name should not be empty");
// Resolve defaults
String resolvedTableDbName = (StringUtils.isBlank(tableDbName)) ? DEFAULT_DB_NAME : tableDbName;
String resolvedViewDbName = (StringUtils.isBlank(viewDbName)) ? DEFAULT_DB_NAME : viewDbName;
List<String> ddls = Lists.newArrayList();
// No-op if view already exists
ddls.add(String.format("CREATE VIEW IF NOT EXISTS `%s`.`%s` AS SELECT * FROM `%s`.`%s`",
resolvedViewDbName, viewName,
resolvedTableDbName, tableName));
// This will force re-build the view
if (shouldUpdateView) {
ddls.add(String.format("ALTER VIEW `%s`.`%s` AS SELECT * FROM `%s`.`%s`",
resolvedViewDbName, viewName,
resolvedTableDbName, tableName));
}
return ddls;
}
/***
* Generate DDL for updating file format of table or partition.
* If partition spec is absent, DDL query to change storage format of Table is generated.
*
* Query syntax:
* <p>
* ALTER TABLE tableName [PARTITION partition_spec] SET FILEFORMAT fileFormat
* </p>
*
* @param dbName Database for the table for which storage format needs to be changed.
* @param tableName Table for which storage format needs to be changed.
* @param partitionsDMLInfo Optional partition spec for which storage format needs to be changed.
* @param format Storage format.
* @return DDL to change storage format for Table or Partition.
*/
public static List<String> generateAlterTableOrPartitionStorageFormatDDL(final String dbName,
final String tableName,
final Optional<Map<String, String>> partitionsDMLInfo,
String format) {
Preconditions.checkArgument(StringUtils.isNotBlank(tableName), "Table name should not be empty");
Preconditions.checkArgument(StringUtils.isNotBlank(format), "Format should not be empty");
// Resolve defaults
String resolvedDbName = (StringUtils.isBlank(dbName)) ? DEFAULT_DB_NAME : dbName;
// Partition details
StringBuilder partitionSpecs = new StringBuilder();
if (partitionsDMLInfo.isPresent()) {
partitionSpecs.append("PARTITION (");
boolean isFirstPartitionSpec = true;
for (Map.Entry<String, String> partition : partitionsDMLInfo.get().entrySet()) {
if (isFirstPartitionSpec) {
isFirstPartitionSpec = false;
} else {
partitionSpecs.append(", ");
}
partitionSpecs.append(String.format("`%s`='%s'", partition.getKey(), partition.getValue()));
}
partitionSpecs.append(") ");
}
List<String> ddls = Lists.newArrayList();
// Note: Hive does not support fully qualified Hive table names such as db.table for ALTER TABLE in v0.13
// .. hence specifying 'use dbName' as a precursor to rename
// Refer: HIVE-2496
ddls.add(String.format("USE %s%n", resolvedDbName));
ddls.add(String.format("ALTER TABLE %s %s SET FILEFORMAT %s", tableName, partitionSpecs, format));
return ddls;
}
/***
* Serialize a {@link QueryBasedHivePublishEntity} into a {@link State} at {@link #SERIALIZED_PUBLISH_TABLE_COMMANDS}.
* @param state {@link State} to serialize entity into.
* @param queryBasedHivePublishEntity to carry to publisher.
*/
public static void serializePublishCommands(State state, QueryBasedHivePublishEntity queryBasedHivePublishEntity) {
state.setProp(HiveAvroORCQueryGenerator.SERIALIZED_PUBLISH_TABLE_COMMANDS,
GSON.toJson(queryBasedHivePublishEntity));
}
/***
* Deserialize the publish entity from a {@link State} at {@link #SERIALIZED_PUBLISH_TABLE_COMMANDS}.
* @param state {@link State} to look into for serialized entity.
* @return Publish table entity.
*/
public static QueryBasedHivePublishEntity deserializePublishCommands(State state) {
QueryBasedHivePublishEntity queryBasedHivePublishEntity =
GSON.fromJson(state.getProp(HiveAvroORCQueryGenerator.SERIALIZED_PUBLISH_TABLE_COMMANDS),
QueryBasedHivePublishEntity.class);
return queryBasedHivePublishEntity == null ? new QueryBasedHivePublishEntity() : queryBasedHivePublishEntity;
}
public static boolean isTypeEvolved(String evolvedType, String destinationType) {
if (evolvedType.equalsIgnoreCase(destinationType)) {
// Same type, not evolved
return false;
}
// Look for compatibility in evolved type
if (HiveAvroTypeConstants.HIVE_COMPATIBLE_TYPES.containsKey(destinationType)) {
if (HiveAvroTypeConstants.HIVE_COMPATIBLE_TYPES.get(destinationType).contains(evolvedType)) {
return true;
} else {
throw new RuntimeException(String.format("Incompatible type evolution from: %s to: %s",
destinationType, evolvedType));
}
} else {
// We assume all complex types are compatible
// TODO: Add compatibility check when ORC evolution supports complex types
return true;
}
}
/**
* Generate partition spec in Hive standard syntax. (partition_column=partition_col_value, partition_column=partition_col_value, ...)
*/
private static final Function<Map<String, String>, String> PARTITION_SPEC_GENERATOR = new Function<Map<String, String>, String>() {
@Override
public String apply(Map<String, String> partitionDMLInfo) {
if (partitionDMLInfo == null) {
return StringUtils.EMPTY;
}
return String.format(" PARTITION (%s)", Joiner.on(",").withKeyValueSeparator("=").join(Maps.transformValues(partitionDMLInfo, QUOTE_PARTITION_VALUES)));
}
};
private static final Function<String, String> QUOTE_PARTITION_VALUES = new Function<String, String>() {
@Override
public String apply(String value) {
return String.format("'%s'", value);
}
};
private static String escapeStringForHive(String st) {
char backslash = '\\';
char singleQuote = '\'';
char semicolon = ';';
String escapedSingleQuote = String.valueOf(backslash) + String.valueOf(singleQuote);
String escapedSemicolon = String.valueOf(backslash) + String.valueOf(semicolon);
st = st.replace(String.valueOf(singleQuote), escapedSingleQuote)
.replace(String.valueOf(semicolon), escapedSemicolon);
return st;
}
}
| 2,535 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/query/HiveValidationQueryGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.query;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
/***
* Generate Hive queries for validation
*
* @author Abhishek Tiwari
*/
public class HiveValidationQueryGenerator {
/***
* Generate Hive queries for validating converted Hive table.
* @param hiveDataset Source {@link HiveDataset}.
* @param sourcePartition Source {@link Partition} if any.
* @param conversionConfig {@link ConvertibleHiveDataset.ConversionConfig} for conversion.
* @return Validation Hive queries.
*/
public static List<String> generateCountValidationQueries(HiveDataset hiveDataset,
Optional<Partition> sourcePartition, ConvertibleHiveDataset.ConversionConfig conversionConfig) {
// Source and converted destination details
String sourceDatabase = hiveDataset.getDbAndTable().getDb();
String sourceTable = hiveDataset.getDbAndTable().getTable();
String destinationDatabase = conversionConfig.getDestinationDbName();
String destinationTable = conversionConfig.getDestinationTableName();
// Build query.
List<String> queries = Lists.newArrayList();
if (sourcePartition.isPresent()) {
StringBuilder partitionClause = new StringBuilder();
boolean isFirst = true;
String partitionInfo = sourcePartition.get().getName();
List<String> pInfo = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(partitionInfo);
for (String aPInfo : pInfo) {
List<String> pInfoParts = Splitter.on("=").omitEmptyStrings().trimResults().splitToList(aPInfo);
if (pInfoParts.size() != 2) {
throw new IllegalArgumentException(String
.format("Partition details should be of the format " + "partitionName=partitionValue. Recieved: %s",
aPInfo));
}
if (isFirst) {
isFirst = false;
} else {
partitionClause.append(" and ");
}
partitionClause.append("`").append(pInfoParts.get(0)).append("`='").append(pInfoParts.get(1)).append("'");
}
queries.add(String
.format("SELECT count(*) FROM `%s`.`%s` WHERE %s ", sourceDatabase, sourceTable, partitionClause));
queries.add(String.format("SELECT count(*) FROM `%s`.`%s` WHERE %s ", destinationDatabase, destinationTable,
partitionClause));
} else {
queries.add(String.format("SELECT count(*) FROM `%s`.`%s` ", sourceDatabase, sourceTable));
queries.add(String.format("SELECT count(*) FROM `%s`.`%s` ", destinationDatabase, destinationTable));
}
return queries;
}
/***
* Generates Hive SQL that can be used to validate the quality between two {@link Table}s or optionally
* {@link Partition}. The query returned is a basic join query that returns the number of records matched
* between the two {@link Table}s.
* The responsibility of actually comparing this value with the expected module should be implemented by
* the user.
*
* @param sourceTable Source Hive {@link Table} name.
* @param sourceDb Source Hive database name.
* @param targetTable Target Hive {@link Table} name.
* @param optionalPartition Optional {@link Partition} to limit the comparison.
* @return Query to find number of rows common between two tables.
*/
public static String generateDataValidationQuery(String sourceTable, String sourceDb, Table targetTable,
Optional<Partition> optionalPartition, boolean isNestedORC) {
StringBuilder sb = new StringBuilder();
// Query head
sb.append("SELECT count(*) FROM `")
.append(sourceDb).append("`.`").append(sourceTable).append("` s JOIN `")
.append(targetTable.getDbName()).append("`.`").append(targetTable.getTableName()).append("` t ON \n");
// Columns equality
boolean isFirst = true;
List<FieldSchema> fieldList = targetTable.getSd().getCols();
for (FieldSchema field : fieldList) {
// Do not add maps in the join clause. Hive does not support map joins LIHADOOP-21956
if (StringUtils.startsWithIgnoreCase(field.getType(), "map")) {
continue;
}
if (StringUtils.containsIgnoreCase(field.getType(), ":map")) {
continue;
}
if (isFirst) {
isFirst = false;
} else {
sb.append(" AND \n");
}
if (isNestedORC) {
sb.append("\ts.`").append(field.getName()).append("`<=>");
} else {
// The source column lineage information is available in field's comment. Remove the description prefix "from flatten_source"
String colName = field.getComment().replaceAll("from flatten_source ", "").trim();
sb.append("\ts.`").append(colName.replaceAll("\\.", "`.`")).append("`<=>");
}
sb.append("t.`").append(field.getName()).append("` ");
}
sb.append("\n");
// Partition projection
if (optionalPartition.isPresent()) {
Partition partition = optionalPartition.get();
String partitionsInfoString = partition.getName();
List<String> pInfo = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
for (int i = 0; i < pInfo.size(); i++) {
List<String> partitionInfoParts = Splitter.on("=").omitEmptyStrings().trimResults().splitToList(pInfo.get(i));
if (partitionInfoParts.size() != 2) {
throw new IllegalArgumentException(
String.format("Partition details should be of the format partitionName=partitionValue. Recieved: %s", pInfo.get(i)));
}
if (i==0) {
// add where clause
sb.append(" WHERE \n");
} else {
sb.append(" AND ");
}
// add project for source and destination partition
sb.append(String.format("s.`%s`='%s' ", partitionInfoParts.get(0), partitionInfoParts.get(1)));
sb.append(" AND ");
sb.append(String.format("t.`%s`='%s' ", partitionInfoParts.get(0), partitionInfoParts.get(1)));
}
}
return sb.toString();
}
}
| 2,536 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/SchemaAwareHiveTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.ql.metadata.Table;
/**
* An extension to the {@link Table} that also knows the {@link Schema} of this {@link Table}
*/
@EqualsAndHashCode(callSuper=true)
public class SchemaAwareHiveTable extends Table {
@Getter
private final Schema avroSchema;
private static final long serialVersionUID = 1856720117875056735L;
public SchemaAwareHiveTable(org.apache.hadoop.hive.metastore.api.Table table, Schema schema) {
super(table);
this.avroSchema = schema;
}
}
| 2,537 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/QueryBasedHivePublishEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import java.util.List;
import java.util.Map;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* Entity to carry Hive queries to publish table and partitions from writer to publisher.
* This entity also holds references to directories to be moved or deleted while publishing.
*/
@ToString
@EqualsAndHashCode
@Getter
public class QueryBasedHivePublishEntity {
// Hive queries to execute to publish table and / or partitions.
private List<String> publishQueries;
// Directories to move: key is source, value is destination.
private Map<String, String> publishDirectories;
// Hive queries to cleanup after publish step.
private List<String> cleanupQueries;
// Directories to delete after publish step.
private List<String> cleanupDirectories;
public QueryBasedHivePublishEntity() {
this.publishQueries = Lists.newArrayList();
this.publishDirectories = Maps.newHashMap();
this.cleanupQueries = Lists.newArrayList();
this.cleanupDirectories = Lists.newArrayList();
}
}
| 2,538 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/TableLikeStageableTableMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import java.util.ArrayList;
import java.util.Properties;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
/**
* A {@link StageableTableMetadata} that copies most metadata from a reference table.
*/
public class TableLikeStageableTableMetadata extends StageableTableMetadata {
public TableLikeStageableTableMetadata(Table referenceTable, String destinationDB, String destinationTableName, String targetDataPath) {
super(destinationTableName, destinationTableName + "_STAGING", destinationDB, targetDataPath, true,
getTableProperties(referenceTable), new ArrayList<>(), Optional.of(referenceTable.getNumBuckets()), new Properties(), false, false, Optional.absent(),
new ArrayList<>());
}
public TableLikeStageableTableMetadata(Table referenceTable, Config config) {
super(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable),
HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable) + "_STAGING",
HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DB_KEY), referenceTable),
HiveDataset.resolveTemplate(config.getString(DESTINATION_DATA_PATH_KEY), referenceTable),
(!config.hasPath(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR) ||
Boolean.parseBoolean(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR), referenceTable))),
getTableProperties(referenceTable), new ArrayList<>(), Optional.of(referenceTable.getNumBuckets()),
new Properties(), false, false, Optional.absent(), new ArrayList<>());
}
private static Properties getTableProperties(Table table) {
Properties properties = new Properties();
properties.putAll(table.getParameters());
return properties;
}
}
| 2,539 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/StageableTableMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* Contains metadata associated with a stageable table.
*
* This class contains information about two Hive tables: a final destination table and a staging table. The staging
* table is used as temporary storage during job run to aid with consistency of the final destination table.
*/
@Data
@AllArgsConstructor
public class StageableTableMetadata {
public static final String DESTINATION_TABLE_KEY = "destination.tableName";
public static final String DESTINATION_DB_KEY = "destination.dbName";
public static final String DESTINATION_DATA_PATH_KEY = "destination.dataPath";
public static final String DESTINATION_DATA_PATH_ADD_SUBDIR = "destination.dataPathAddSubDir";
public static final String DESTINATION_TABLE_PROPERTIES_LIST_KEY = "destination.tableProperties";
public static final String CLUSTER_BY_KEY = "clusterByList";
public static final String NUM_BUCKETS_KEY = "numBuckets";
public static final String EVOLUTION_ENABLED = "evolution.enabled";
public static final String ROW_LIMIT_KEY = "rowLimit";
public static final String HIVE_VERSION_KEY = "hiveVersion";
public static final String HIVE_RUNTIME_PROPERTIES_LIST_KEY = "hiveRuntimeProperties";
/**
* The output of Hive-based conversion can preserve casing in ORC-writer if "columns" and "columns.types" are being set
* in table-level properties. Turning this off by-default as Hive engine itself doesn't preserve the case.
*/
public static final String OUTPUT_FILE_CASE_PRESERVED = "casePreserved";
/***
* Comma separated list of string that should be used as a prefix for destination partition directory name
* ... (if present in the location path string of source partition)
*
* This is helpful in roll-up / compaction scenarios, where you don't want queries in flight to fail.
*
* Scenario without this property:
* - Source partition: datepartition=2016-01-01-00 with path /foo/bar/hourly/2016/01/01/00 is available for
* processing
* - Source partition is processed and published to destination table as: /foo/bar_orc/datepartition=2016-01-01-00
*
* - Source partition: datepartition=2016-01-01-00 with path /foo/bar/daily/2016/01/01/00 is available again for
* processing (due to roll-up / compaction of hourly data for 2016-01-01 into same partition)
* - Source partition is processed and published to destination table as: /foo/bar_orc/datepartition=2016-01-01-00
* (previous data is overwritten and any queries in flight fail)
*
* Same scenario with this property set to "hourly,daily":
* - Source partition: datepartition=2016-01-01-00 with path /foo/bar/hourly/2016/01/01/00 is available for
* processing
* - Source partition is processed and published to destination table as: /foo/bar_orc/hourly_datepartition=2016-01-01-00
* (Note: "hourly_" is prefixed to destination partition directory name because source partition path contains
* "hourly" substring)
*
* - Source partition: datepartition=2016-01-01-00 with path /foo/bar/daily/2016/01/01/00 is available again for
* processing (due to roll-up / compaction of hourly data for 2016-01-01 into same partition)
* - Source partition is processed and published to destination table as: /foo/bar_orc/daily_datepartition=2016-01-01-00
* (Note: "daily_" is prefixed to destination partition directory name, because source partition path contains
* "daily" substring)
* - Any running queries are not impacted since data is not overwritten and hourly_datepartition=2016-01-01-00
* directory continues to exist
*
* Notes:
* - This however leaves the responsibility of cleanup of previous destination partition directory on retention or
* other such independent module, since in the above case hourly_datepartition=2016-01-01-00 dir will not be deleted
* - Directories can still be overwritten if they resolve to same destination partition directory name, such as
* re-processing / backfill of daily partition will overwrite daily_datepartition=2016-01-01-00 directory
*/
public static final String SOURCE_DATA_PATH_IDENTIFIER_KEY = "source.dataPathIdentifier";
/**
* Attributes like "avro.schema.literal" are usually used in offline system as the source-of-truth of schema.
* This configuration's value should be the key name that users expects to preserve to schema string if necessary.
*/
public static final String SCHEMA_SOURCE_OF_TRUTH = "schema.original";
/** Table name of the destination table. */
private final String destinationTableName;
/** Table name of the staging table. */
private final String destinationStagingTableName;
/** Name of db for destination name. */
private final String destinationDbName;
/** Path where files of the destination table should be located. */
private final String destinationDataPath;
/** Flag whether adding a subdirectory after destinationDataPath */
private final Boolean dataDstPathUseSubdir;
/** Table properties of destination table. */
private final Properties destinationTableProperties;
/** List of columns to cluster by. */
private final List<String> clusterBy;
/** Number of buckets in destination table. */
private final Optional<Integer> numBuckets;
private final Properties hiveRuntimeProperties;
private final boolean evolutionEnabled;
private final boolean casePreserved;
private final Optional<Integer> rowLimit;
private final List<String> sourceDataPathIdentifier;
public StageableTableMetadata(Config config, @Nullable Table referenceTable) {
Preconditions.checkArgument(config.hasPath(DESTINATION_TABLE_KEY), String.format("Key %s is not specified", DESTINATION_TABLE_KEY));
Preconditions.checkArgument(config.hasPath(DESTINATION_DB_KEY), String.format("Key %s is not specified", DESTINATION_DB_KEY));
Preconditions.checkArgument(config.hasPath(DESTINATION_DATA_PATH_KEY),
String.format("Key %s is not specified", DESTINATION_DATA_PATH_KEY));
// Required
this.destinationTableName = referenceTable == null ? config.getString(DESTINATION_TABLE_KEY)
: HiveDataset.resolveTemplate(config.getString(DESTINATION_TABLE_KEY), referenceTable);
this.destinationStagingTableName = String.format("%s_%s", this.destinationTableName, "staging"); // Fixed and non-configurable
this.destinationDbName = referenceTable == null ? config.getString(DESTINATION_DB_KEY)
: HiveDataset.resolveTemplate(config.getString(DESTINATION_DB_KEY), referenceTable);
this.destinationDataPath = referenceTable == null ? config.getString(DESTINATION_DATA_PATH_KEY)
: HiveDataset.resolveTemplate(config.getString(DESTINATION_DATA_PATH_KEY), referenceTable);
// By default, this value is true and subDir "/final" is being added into orc output location.
this.dataDstPathUseSubdir = !config.hasPath(DESTINATION_DATA_PATH_ADD_SUBDIR) || config.getBoolean(DESTINATION_DATA_PATH_ADD_SUBDIR);
// Optional
this.destinationTableProperties =
convertKeyValueListToProperties(ConfigUtils.getStringList(config, DESTINATION_TABLE_PROPERTIES_LIST_KEY));
this.clusterBy = ConfigUtils.getStringList(config, CLUSTER_BY_KEY);
this.numBuckets = Optional.fromNullable(ConfigUtils.getInt(config, NUM_BUCKETS_KEY, null));
this.hiveRuntimeProperties =
convertKeyValueListToProperties(ConfigUtils.getStringList(config, HIVE_RUNTIME_PROPERTIES_LIST_KEY));
this.evolutionEnabled = ConfigUtils.getBoolean(config, EVOLUTION_ENABLED, false);
this.casePreserved = ConfigUtils.getBoolean(config, OUTPUT_FILE_CASE_PRESERVED, false);
this.rowLimit = Optional.fromNullable(ConfigUtils.getInt(config, ROW_LIMIT_KEY, null));
this.sourceDataPathIdentifier = ConfigUtils.getStringList(config, SOURCE_DATA_PATH_IDENTIFIER_KEY);
}
private Properties convertKeyValueListToProperties(List<String> keyValueList) {
Preconditions.checkArgument(keyValueList.size() % 2 == 0, String.format(
"The list %s does not have equal number of keys and values. Size %s", keyValueList, keyValueList.size()));
Properties props = new Properties();
for (int i = 0; i < keyValueList.size(); i += 2) {
String key = keyValueList.get(i);
String value = keyValueList.get(i + 1);
props.put(key, value);
}
return props;
}
}
| 2,540 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/ReplaceTableStageableTableMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import org.apache.hadoop.hive.ql.metadata.Table;
/**
* A {@link StageableTableMetadata} intended where the target table is the same as the reference table. Intended to
* replace the original table.
*/
public class ReplaceTableStageableTableMetadata extends TableLikeStageableTableMetadata {
public ReplaceTableStageableTableMetadata(Table referenceTable) {
super(referenceTable, referenceTable.getDbName(), referenceTable.getTableName(), referenceTable.getDataLocation().toString());
}
}
| 2,541 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/SchemaAwareHivePartition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import lombok.Getter;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
/**
* An extension to the {@link Partition} class that also knows the {@link Schema} of this {@link Partition}
*/
public class SchemaAwareHivePartition extends Partition {
@Getter
private final Schema avroSchema;
private static final long serialVersionUID = -6420854225641474362L;
public SchemaAwareHivePartition(org.apache.hadoop.hive.metastore.api.Table table, org.apache.hadoop.hive.metastore.api.Partition partition, Schema schema)
throws HiveException {
super(new Table(table), partition);
this.avroSchema = schema;
}
}
| 2,542 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/QueryBasedHiveConversionEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.extractor.HiveConvertExtractor;
import org.apache.gobblin.data.management.conversion.hive.writer.HiveQueryExecutionWriter;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.source.extractor.Extractor;
/**
* Represents a gobblin Record in the Hive avro to orc conversion flow.
* The {@link HiveConvertExtractor} extracts exactly one {@link QueryBasedHiveConversionEntity}.
* This class is a container for all metadata about a {@link HiveTable} or a {@link HivePartition} needed to build
* the hive conversion query.The gobblin task constructs can mutate this object as it get passed from
* {@link Extractor} to {@link Converter}s.
*
* <ul>
* <li> The {@link HiveConvertExtractor} creates {@link QueryBasedHiveConversionEntity} using the {@link HiveRegistrationUnit}
* in the workunit
* <li> The {@link AbstractAvroToOrcConverter} builds the {@link QueryBasedHiveConversionEntity#query} using
* {@link QueryBasedHiveConversionEntity#hiveUnitSchema}.
* <li> The {@link HiveQueryExecutionWriter} executes the hive query at {@link QueryBasedHiveConversionEntity#getConversionQuery()}
* </ul>
*/
@ToString
@EqualsAndHashCode
@Getter
public class QueryBasedHiveConversionEntity extends HiveProcessingEntity {
private final ConvertibleHiveDataset convertibleHiveDataset;
private final SchemaAwareHiveTable hiveTable;
private final Optional<SchemaAwareHivePartition> hivePartition;
/**
* A {@link StringBuilder} for the hive conversion query
*/
private final List<String> queries;
public QueryBasedHiveConversionEntity(ConvertibleHiveDataset convertibleHiveDataset, SchemaAwareHiveTable hiveTable) {
this(convertibleHiveDataset, hiveTable, Optional.<SchemaAwareHivePartition> absent());
}
public QueryBasedHiveConversionEntity(ConvertibleHiveDataset convertibleHiveDataset, SchemaAwareHiveTable hiveTable,
Optional<SchemaAwareHivePartition> hivePartition) {
super(convertibleHiveDataset, hiveTable, Optional.fromNullable(hivePartition.orNull()));
this.convertibleHiveDataset = convertibleHiveDataset;
this.hiveTable = hiveTable;
this.hivePartition = hivePartition;
this.queries = Lists.newArrayList();
}
}
| 2,543 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/entities/HiveProcessingEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.entities;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Optional;
import lombok.Getter;
/**
* Represents a Hive table and optionally partition.
*/
@Getter
public class HiveProcessingEntity {
private final HiveDataset hiveDataset;
private final Table table;
private final Optional<Partition> partition;
public HiveProcessingEntity(HiveDataset hiveDataset, Table table) {
this(hiveDataset, table, Optional.absent());
}
public HiveProcessingEntity(HiveDataset convertibleHiveDataset, Table table,
Optional<Partition> partition) {
this.hiveDataset = convertibleHiveDataset;
this.table = table;
this.partition = partition;
}
}
| 2,544 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/hive/HiveConfigClientUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.hive;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
/**
* Utility methods for a {@link HiveDataset} to communicate with {@link ConfigClient}
*/
public class HiveConfigClientUtils {
private static final String HIVE_DATASETS_CONFIG_PREFIX = "hive" + Path.SEPARATOR;
/**
* Get the dataset uri for a hive db and table. The uri is relative to the store uri .
* @param table the hive table for which a config client uri needs to be built
*/
public static String getDatasetUri(Table table) {
return HIVE_DATASETS_CONFIG_PREFIX + table.getDbName() + Path.SEPARATOR + table.getTableName();
}
}
| 2,545 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/hive/HiveRegistrableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.hive;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.hive.spec.HiveSpec;
/**
* A {@link Dataset} that can be registered in Hive.
*/
@Alpha
public interface HiveRegistrableDataset extends Dataset {
/**
* Get a list of {@link HiveSpec}s for this dataset, which can be used by {@link org.apache.gobblin.hive.HiveRegister}
* to register this dataset in Hive.
*/
public List<HiveSpec> getHiveSpecs() throws IOException;
}
| 2,546 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/UnixTimestampCopyableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* {@link org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder} that returns datasets of type
* {@link org.apache.gobblin.data.management.copy.TimeAwareRecursiveCopyableDataset}.N
*/
public class UnixTimestampCopyableDatasetFinder extends ConfigurableGlobDatasetFinder<CopyableDataset> {
public UnixTimestampCopyableDatasetFinder(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public CopyableDataset datasetAtPath(Path path) throws IOException {
return new UnixTimestampRecursiveCopyableDataset(this.fs, path, this.props, this.datasetPattern);
}
} | 2,547 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ManifestBasedDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import org.apache.gobblin.dataset.IterableDatasetFinder;
@Slf4j
public class ManifestBasedDatasetFinder implements IterableDatasetFinder<ManifestBasedDataset> {
public static final String CONFIG_PREFIX = CopyConfiguration.COPY_PREFIX + ".manifestBased";
public static final String MANIFEST_LOCATION = CONFIG_PREFIX + ".manifest.location";
public static final String MANIFEST_READ_FS_URI = CONFIG_PREFIX + ".read.fs.uri";
private final FileSystem srcFs;
private final FileSystem manifestReadFs;
private final List<Path> manifestLocations;
private final Properties properties;
public ManifestBasedDatasetFinder(final FileSystem srcFs, Properties properties) {
Preconditions.checkArgument(properties.containsKey(MANIFEST_LOCATION), "Manifest location key required in config. Please set " + MANIFEST_LOCATION);
this.srcFs = srcFs;
final Optional<String> optManifestReadFsUriStr = Optional.ofNullable(properties.getProperty(MANIFEST_READ_FS_URI));
try {
// config may specify a `FileSystem` other than `srcFs` solely for reading manifests; fallback is to use `srcFs`
this.manifestReadFs = optManifestReadFsUriStr.isPresent()
? FileSystem.get(URI.create(optManifestReadFsUriStr.get()), new Configuration())
: srcFs;
log.info("using file system to read manifest files: '{}'", this.manifestReadFs.getUri());
} catch (final IOException | IllegalArgumentException e) {
throw new RuntimeException("unable to create manifest-loading FS at URI '" + optManifestReadFsUriStr + "'", e);
}
manifestLocations = new ArrayList<>();
this.properties = properties;
Splitter.on(',').trimResults().split(properties.getProperty(MANIFEST_LOCATION)).forEach(s -> manifestLocations.add(new Path(s)));
}
@Override
public List<ManifestBasedDataset> findDatasets() throws IOException {
return manifestLocations.stream().map(p -> new ManifestBasedDataset(srcFs, manifestReadFs, p, properties)).collect(Collectors.toList());
}
@Override
public Path commonDatasetRoot() {
return new Path("/");
}
@Override
public Iterator<ManifestBasedDataset> getDatasetsIterator() throws IOException {
return manifestLocations.stream().map(p -> new ManifestBasedDataset(srcFs, manifestReadFs, p, properties)).iterator();
}
}
| 2,548 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopySource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicates;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import com.google.common.collect.SetMultimap;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.extractor.EmptyExtractor;
import org.apache.gobblin.data.management.copy.extractor.FileAwareInputStreamExtractor;
import org.apache.gobblin.data.management.copy.prioritization.FileSetComparator;
import org.apache.gobblin.data.management.copy.publisher.CopyEventSubmitterHelper;
import org.apache.gobblin.data.management.copy.replication.ConfigBasedDataset;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
import org.apache.gobblin.data.management.copy.watermark.CopyableFileWatermarkGenerator;
import org.apache.gobblin.data.management.copy.watermark.CopyableFileWatermarkHelper;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.data.management.partition.CopyableDatasetRequestor;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.data.management.partition.FileSetResourceEstimator;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.IterableDatasetFinderImpl;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitWeighter;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.binpacking.FieldWeighter;
import org.apache.gobblin.util.binpacking.WorstFitDecreasingBinPacking;
import org.apache.gobblin.util.deprecation.DeprecationUtils;
import org.apache.gobblin.util.executors.IteratorExecutor;
import org.apache.gobblin.util.guid.Guid;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.request_allocation.GreedyAllocator;
import org.apache.gobblin.util.request_allocation.HierarchicalAllocator;
import org.apache.gobblin.util.request_allocation.HierarchicalPrioritizer;
import org.apache.gobblin.util.request_allocation.PriorityIterableBasedRequestAllocator;
import org.apache.gobblin.util.request_allocation.RequestAllocator;
import org.apache.gobblin.util.request_allocation.RequestAllocatorConfig;
import org.apache.gobblin.util.request_allocation.RequestAllocatorUtils;
import org.apache.gobblin.service.ServiceConfigKeys;
/**
* {@link org.apache.gobblin.source.Source} that generates work units from {@link org.apache.gobblin.data.management.copy.CopyableDataset}s.
*
*/
@Slf4j
public class CopySource extends AbstractSource<String, FileAwareInputStream> {
public static final String DEFAULT_DATASET_PROFILE_CLASS_KEY = CopyableGlobDatasetFinder.class.getCanonicalName();
public static final String SERIALIZED_COPYABLE_FILE = CopyConfiguration.COPY_PREFIX + ".serialized.copyable.file";
public static final String COPY_ENTITY_CLASS = CopyConfiguration.COPY_PREFIX + ".copy.entity.class";
public static final String SERIALIZED_COPYABLE_DATASET =
CopyConfiguration.COPY_PREFIX + ".serialized.copyable.datasets";
public static final String WORK_UNIT_GUID = CopyConfiguration.COPY_PREFIX + ".work.unit.guid";
public static final String MAX_CONCURRENT_LISTING_SERVICES =
CopyConfiguration.COPY_PREFIX + ".max.concurrent.listing.services";
public static final int DEFAULT_MAX_CONCURRENT_LISTING_SERVICES = 20;
public static final String MAX_FILES_COPIED_KEY = CopyConfiguration.COPY_PREFIX + ".max.files.copied";
public static final String SIMULATE = CopyConfiguration.COPY_PREFIX + ".simulate";
public static final String MAX_SIZE_MULTI_WORKUNITS = CopyConfiguration.COPY_PREFIX + ".binPacking.maxSizePerBin";
public static final String MAX_WORK_UNITS_PER_BIN = CopyConfiguration.COPY_PREFIX + ".binPacking.maxWorkUnitsPerBin";
public static final String REQUESTS_EXCEEDING_AVAILABLE_RESOURCE_POOL_EVENT_NAME =
"RequestsExceedingAvailableResourcePoolEvent";
public static final String REQUESTS_DROPPED_EVENT_NAME = "RequestsDroppedEvent";
public static final String REQUESTS_REJECTED_DUE_TO_INSUFFICIENT_EVICTION_EVENT_NAME =
"RequestsRejectedDueToInsufficientEvictionEvent";
public static final String REQUESTS_REJECTED_WITH_LOW_PRIORITY_EVENT_NAME = "RequestsRejectedWithLowPriorityEvent";
public static final String FILESET_NAME = "fileset.name";
public static final String FILESET_TOTAL_ENTITIES = "fileset.total.entities";
public static final String FILESET_TOTAL_SIZE_IN_BYTES = "fileset.total.size";
public static final String SCHEMA_CHECK_ENABLED = "shcema.check.enabled";
public final static boolean DEFAULT_SCHEMA_CHECK_ENABLED = false;
private static final String WORK_UNIT_WEIGHT = CopyConfiguration.COPY_PREFIX + ".workUnitWeight";
private final WorkUnitWeighter weighter = new FieldWeighter(WORK_UNIT_WEIGHT);
public MetricContext metricContext;
public EventSubmitter eventSubmitter;
protected Optional<LineageInfo> lineageInfo;
/**
* <ul>
* Does the following:
* <li>Instantiate a {@link DatasetsFinder}.
* <li>Find all {@link Dataset} using {@link DatasetsFinder}.
* <li>For each {@link CopyableDataset} get all {@link CopyEntity}s.
* <li>Create a {@link WorkUnit} per {@link CopyEntity}.
* </ul>
*
* <p>
* In this implementation, one workunit is created for every {@link CopyEntity} found. But the extractor/converters
* and writers are built to support multiple {@link CopyEntity}s per workunit
* </p>
*
* @param state see {@link org.apache.gobblin.configuration.SourceState}
* @return Work units for copying files.
*/
@Override
public List<WorkUnit> getWorkunits(final SourceState state) {
this.metricContext = Instrumented.getMetricContext(state, CopySource.class);
this.lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
try {
DeprecationUtils
.renameDeprecatedKeys(state, CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.ENTITIES_KEY,
Lists.newArrayList(MAX_FILES_COPIED_KEY));
final FileSystem sourceFs = HadoopUtils.getSourceFileSystem(state);
final FileSystem targetFs = HadoopUtils.getWriterFileSystem(state, 1, 0);
state.setProp(SlaEventKeys.SOURCE_URI, sourceFs.getUri());
state.setProp(SlaEventKeys.DESTINATION_URI, targetFs.getUri());
log.info("Identified source file system at {} and target file system at {}.", sourceFs.getUri(),
targetFs.getUri());
long maxSizePerBin = state.getPropAsLong(MAX_SIZE_MULTI_WORKUNITS, 0);
long maxWorkUnitsPerMultiWorkUnit = state.getPropAsLong(MAX_WORK_UNITS_PER_BIN, 50);
final long minWorkUnitWeight = Math.max(1, maxSizePerBin / maxWorkUnitsPerMultiWorkUnit);
final Optional<CopyableFileWatermarkGenerator> watermarkGenerator =
CopyableFileWatermarkHelper.getCopyableFileWatermarkGenerator(state);
int maxThreads = state.getPropAsInt(MAX_CONCURRENT_LISTING_SERVICES, DEFAULT_MAX_CONCURRENT_LISTING_SERVICES);
final CopyConfiguration copyConfiguration = CopyConfiguration.builder(targetFs, state.getProperties()).build();
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, CopyConfiguration.COPY_PREFIX).build();
DatasetsFinder<CopyableDatasetBase> datasetFinder = DatasetUtils
.instantiateDatasetFinder(state.getProperties(), sourceFs, DEFAULT_DATASET_PROFILE_CLASS_KEY,
this.eventSubmitter, state);
IterableDatasetFinder<CopyableDatasetBase> iterableDatasetFinder =
datasetFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<CopyableDatasetBase>) datasetFinder
: new IterableDatasetFinderImpl<>(datasetFinder);
Iterator<CopyableDatasetRequestor> requestorIteratorWithNulls = Iterators
.transform(iterableDatasetFinder.getDatasetsIterator(),
new CopyableDatasetRequestor.Factory(targetFs, copyConfiguration, log));
Iterator<CopyableDatasetRequestor> requestorIterator =
Iterators.filter(requestorIteratorWithNulls, Predicates.<CopyableDatasetRequestor>notNull());
final SetMultimap<FileSet<CopyEntity>, WorkUnit> workUnitsMap =
Multimaps.<FileSet<CopyEntity>, WorkUnit>synchronizedSetMultimap(
HashMultimap.<FileSet<CopyEntity>, WorkUnit>create());
RequestAllocator<FileSet<CopyEntity>> allocator = createRequestAllocator(copyConfiguration, maxThreads);
Iterator<FileSet<CopyEntity>> prioritizedFileSets =
allocator.allocateRequests(requestorIterator, copyConfiguration.getMaxToCopy());
//Submit alertable events for unfulfilled requests and fail if all of the allocated requests were rejected due to size
submitUnfulfilledRequestEvents(allocator);
failJobIfAllRequestsRejected(allocator, prioritizedFileSets);
String filesetWuGeneratorAlias = state.getProp(ConfigurationKeys.COPY_SOURCE_FILESET_WU_GENERATOR_CLASS, FileSetWorkUnitGenerator.class.getName());
boolean shouldWuGeneratorFailureBeFatal = state.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_GENERATOR_FAILURE_IS_FATAL, ConfigurationKeys.DEFAULT_WORK_UNIT_FAST_FAIL_ENABLED);
Iterator<Callable<Void>> callableIterator =
Iterators.transform(prioritizedFileSets, new Function<FileSet<CopyEntity>, Callable<Void>>() {
@Nullable
@Override
public Callable<Void> apply(FileSet<CopyEntity> input) {
try {
return GobblinConstructorUtils.<FileSetWorkUnitGenerator>invokeLongestConstructor(
new ClassAliasResolver(FileSetWorkUnitGenerator.class).resolveClass(filesetWuGeneratorAlias),
input.getDataset(), input, state, targetFs, workUnitsMap, watermarkGenerator, minWorkUnitWeight, lineageInfo);
} catch (Exception e) {
throw new RuntimeException("Cannot create workunits generator", e);
}
}
});
try {
List<Future<Void>> futures = new IteratorExecutor<>(callableIterator, maxThreads,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of("Copy-file-listing-pool-%d")))
.execute();
for (Future<Void> future : futures) {
try {
future.get();
} catch (ExecutionException exc) {
log.error("Failed to get work units for dataset.", exc.getCause());
if (shouldWuGeneratorFailureBeFatal) {
throw new RuntimeException("Failed to get work units for dataset.", exc.getCause());
}
}
}
} catch (InterruptedException ie) {
log.error("Retrieval of work units was interrupted. Aborting.");
return Lists.newArrayList();
}
log.info(String.format("Created %s workunits ", workUnitsMap.size()));
copyConfiguration.getCopyContext().logCacheStatistics();
if (state.contains(SIMULATE) && state.getPropAsBoolean(SIMULATE)) {
log.info("Simulate mode enabled. Will not execute the copy.");
for (Map.Entry<FileSet<CopyEntity>, Collection<WorkUnit>> entry : workUnitsMap.asMap().entrySet()) {
log.info(String.format("Actions for dataset %s file set %s.", entry.getKey().getDataset().datasetURN(),
entry.getKey().getName()));
for (WorkUnit workUnit : entry.getValue()) {
try {
CopyEntity copyEntity = deserializeCopyEntity(workUnit);
log.info(copyEntity.explain());
} catch (Exception e) {
log.info("Cannot deserialize CopyEntity from wu : {}", workUnit.toString());
}
}
}
return Lists.newArrayList();
}
List<? extends WorkUnit> workUnits = new WorstFitDecreasingBinPacking(maxSizePerBin)
.pack(Lists.newArrayList(workUnitsMap.values()), this.weighter);
log.info(String.format(
"Bin packed work units. Initial work units: %d, packed work units: %d, max weight per bin: %d, "
+ "max work units per bin: %d.", workUnitsMap.size(), workUnits.size(), maxSizePerBin,
maxWorkUnitsPerMultiWorkUnit));
return ImmutableList.copyOf(workUnits);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void submitUnfulfilledRequestEventsHelper(List<FileSet<CopyEntity>> fileSetList, String eventName) {
for (FileSet<CopyEntity> fileSet : fileSetList) {
GobblinTrackingEvent event =
GobblinTrackingEvent.newBuilder().setName(eventName).setNamespace(CopySource.class.getName()).setMetadata(
ImmutableMap.<String, String>builder()
.put(ConfigurationKeys.DATASET_URN_KEY, fileSet.getDataset().getUrn())
.put(FILESET_TOTAL_ENTITIES, Integer.toString(fileSet.getTotalEntities()))
.put(FILESET_TOTAL_SIZE_IN_BYTES, Long.toString(fileSet.getTotalSizeInBytes()))
.put(FILESET_NAME, fileSet.getName()).build()).build();
this.metricContext.submitEvent(event);
}
}
void failJobIfAllRequestsRejected(RequestAllocator<FileSet<CopyEntity>> allocator,
Iterator<FileSet<CopyEntity>> allocatedRequests) throws IOException {
// TODO: we should set job as partial success if there is a mix of allocated requests and rejections
if (PriorityIterableBasedRequestAllocator.class.isAssignableFrom(allocator.getClass())) {
PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>> priorityIterableBasedRequestAllocator =
(PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>>) allocator;
// If there are no allocated items and are there items exceeding the available resources, then we can infer all items exceed resources
if (!allocatedRequests.hasNext() && priorityIterableBasedRequestAllocator.getRequestsExceedingAvailableResourcePool().size() > 0) {
throw new IOException(String.format("Requested copy datasets are all larger than the available resource pool. Try increasing %s and/or %s",
CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.ENTITIES_KEY, CopyConfiguration.MAX_COPY_PREFIX + ".size"));
}
}
}
private void submitUnfulfilledRequestEvents(RequestAllocator<FileSet<CopyEntity>> allocator) {
if (PriorityIterableBasedRequestAllocator.class.isAssignableFrom(allocator.getClass())) {
PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>> priorityIterableBasedRequestAllocator =
(PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>>) allocator;
submitUnfulfilledRequestEventsHelper(
priorityIterableBasedRequestAllocator.getRequestsExceedingAvailableResourcePool(),
REQUESTS_EXCEEDING_AVAILABLE_RESOURCE_POOL_EVENT_NAME);
submitUnfulfilledRequestEventsHelper(
priorityIterableBasedRequestAllocator.getRequestsRejectedDueToInsufficientEviction(),
REQUESTS_REJECTED_DUE_TO_INSUFFICIENT_EVICTION_EVENT_NAME);
submitUnfulfilledRequestEventsHelper(priorityIterableBasedRequestAllocator.getRequestsRejectedWithLowPriority(),
REQUESTS_REJECTED_WITH_LOW_PRIORITY_EVENT_NAME);
submitUnfulfilledRequestEventsHelper(priorityIterableBasedRequestAllocator.getRequestsDropped(),
REQUESTS_DROPPED_EVENT_NAME);
}
}
private RequestAllocator<FileSet<CopyEntity>> createRequestAllocator(CopyConfiguration copyConfiguration,
int maxThreads) {
Optional<FileSetComparator> prioritizer = copyConfiguration.getPrioritizer();
RequestAllocatorConfig.Builder<FileSet<CopyEntity>> configBuilder =
RequestAllocatorConfig.builder(new FileSetResourceEstimator()).allowParallelization(maxThreads)
.storeRejectedRequests(copyConfiguration.getStoreRejectedRequestsSetting())
.withLimitedScopeConfig(copyConfiguration.getPrioritizationConfig());
if (!prioritizer.isPresent()) {
return new GreedyAllocator<>(configBuilder.build());
} else {
configBuilder.withPrioritizer(prioritizer.get());
}
if (prioritizer.get() instanceof HierarchicalPrioritizer) {
return new HierarchicalAllocator.Factory().createRequestAllocator(configBuilder.build());
} else {
return RequestAllocatorUtils.inferFromConfig(configBuilder.build());
}
}
/**
* {@link Runnable} to generate copy listing for one {@link CopyableDataset}.
*/
@Alias("FileSetWorkUnitGenerator")
@AllArgsConstructor
public static class FileSetWorkUnitGenerator implements Callable<Void> {
protected final CopyableDatasetBase copyableDataset;
protected final FileSet<CopyEntity> fileSet;
protected final State state;
protected final FileSystem targetFs;
protected final SetMultimap<FileSet<CopyEntity>, WorkUnit> workUnitList;
protected final Optional<CopyableFileWatermarkGenerator> watermarkGenerator;
protected final long minWorkUnitWeight;
protected final Optional<LineageInfo> lineageInfo;
@Override
public Void call() {
try {
String extractId = fileSet.getName().replace(':', '_');
Extract extract = new Extract(Extract.TableType.SNAPSHOT_ONLY, CopyConfiguration.COPY_PREFIX, extractId);
List<WorkUnit> workUnitsForPartition = Lists.newArrayList();
long fileSize;
for (CopyEntity copyEntity : fileSet.getFiles()) {
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(this.copyableDataset);
CopyEntity.DatasetAndPartition datasetAndPartition = copyEntity.getDatasetAndPartition(metadata);
WorkUnit workUnit = new WorkUnit(extract);
workUnit.addAll(this.state);
if(this.copyableDataset instanceof ConfigBasedDataset && ((ConfigBasedDataset)this.copyableDataset).schemaCheckEnabled()) {
workUnit.setProp(SCHEMA_CHECK_ENABLED, true);
if (((ConfigBasedDataset) this.copyableDataset).getExpectedSchema() != null) {
workUnit.setProp(ConfigurationKeys.COPY_EXPECTED_SCHEMA, ((ConfigBasedDataset) this.copyableDataset).getExpectedSchema());
}
}
// Ensure that the writer temporary directories are contained within the dataset shard
String datasetPath = this.copyableDataset.getDatasetPath();
workUnit.setProp(ConfigurationKeys.DATASET_DESTINATION_PATH, datasetPath);
serializeCopyEntity(workUnit, copyEntity);
serializeCopyableDataset(workUnit, metadata);
GobblinMetrics.addCustomTagToState(workUnit,
new Tag<>(CopyEventSubmitterHelper.DATASET_ROOT_METADATA_NAME, this.copyableDataset.datasetURN()));
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, datasetAndPartition.toString());
workUnit.setProp(SlaEventKeys.DATASET_URN_KEY, this.copyableDataset.datasetURN());
workUnit.setProp(SlaEventKeys.PARTITION_KEY, copyEntity.getFileSet());
setWorkUnitWeight(workUnit, copyEntity, minWorkUnitWeight);
setWorkUnitWatermark(workUnit, watermarkGenerator, copyEntity);
computeAndSetWorkUnitGuid(workUnit);
addLineageInfo(copyEntity, workUnit);
if (copyEntity instanceof CopyableFile) {
CopyableFile castedCopyEntity = (CopyableFile) copyEntity;
fileSize = castedCopyEntity.getFileStatus().getLen();
workUnit.setProp(ServiceConfigKeys.WORK_UNIT_SIZE, fileSize);
if (DistcpFileSplitter.allowSplit(this.state, this.targetFs)) {
workUnitsForPartition.addAll(DistcpFileSplitter.splitFile((CopyableFile) copyEntity, workUnit, this.targetFs));
} else {
workUnitsForPartition.add(workUnit);
}
} else {
// Occurs when a job has local state files or post publishing steps that are not associated with a byte size
workUnit.setProp(ServiceConfigKeys.WORK_UNIT_SIZE, 0);
workUnitsForPartition.add(workUnit);
}
}
this.workUnitList.putAll(this.fileSet, workUnitsForPartition);
return null;
} catch (IOException ioe) {
throw new RuntimeException("Failed to generate work units for dataset " + this.copyableDataset.datasetURN(),
ioe);
}
}
private void setWorkUnitWatermark(WorkUnit workUnit, Optional<CopyableFileWatermarkGenerator> watermarkGenerator,
CopyEntity copyEntity)
throws IOException {
if (copyEntity instanceof CopyableFile) {
Optional<WatermarkInterval> watermarkIntervalOptional =
CopyableFileWatermarkHelper.getCopyableFileWatermark((CopyableFile) copyEntity, watermarkGenerator);
if (watermarkIntervalOptional.isPresent()) {
workUnit.setWatermarkInterval(watermarkIntervalOptional.get());
}
}
}
private void addLineageInfo(CopyEntity copyEntity, WorkUnit workUnit) {
if (copyEntity instanceof CopyableFile) {
CopyableFile copyableFile = (CopyableFile) copyEntity;
/*
* In Gobblin Distcp, the source and target path info of a CopyableFile are determined by its dataset found by
* a DatasetFinder. Consequently, the source and destination dataset for the CopyableFile lineage are expected
* to be set by the same logic
*/
if (lineageInfo.isPresent() && copyableFile.getSourceData() != null
&& copyableFile.getDestinationData() != null) {
lineageInfo.get().setSource(copyableFile.getSourceData(), workUnit);
}
}
}
}
/**
* @param state a {@link org.apache.gobblin.configuration.WorkUnitState} carrying properties needed by the returned
* {@link Extractor}
* @return a {@link FileAwareInputStreamExtractor}.
* @throws IOException
*/
@Override
public Extractor<String, FileAwareInputStream> getExtractor(WorkUnitState state)
throws IOException {
Class<?> copyEntityClass = getCopyEntityClass(state);
if (CopyableFile.class.isAssignableFrom(copyEntityClass)) {
CopyableFile copyEntity = (CopyableFile) deserializeCopyEntity(state);
return extractorForCopyableFile(HadoopUtils.getSourceFileSystem(state), copyEntity, state);
}
return new EmptyExtractor<>("empty");
}
protected Extractor<String, FileAwareInputStream> extractorForCopyableFile(FileSystem fs, CopyableFile cf,
WorkUnitState state)
throws IOException {
return new FileAwareInputStreamExtractor(fs, cf, state);
}
@Override
public void shutdown(SourceState state) {
}
/**
* @deprecated use {@link HadoopUtils#getSourceFileSystem(State)}.
*/
@Deprecated
protected FileSystem getSourceFileSystem(State state)
throws IOException {
Configuration conf =
HadoopUtils.getConfFromState(state, Optional.of(ConfigurationKeys.SOURCE_FILEBASED_ENCRYPTED_CONFIG_PATH));
String uri = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI);
return HadoopUtils.getOptionallyThrottledFileSystem(FileSystem.get(URI.create(uri), conf), state);
}
/**
* @deprecated use {@link HadoopUtils#getWriterFileSystem(State, int, int)}.
*/
@Deprecated
private static FileSystem getTargetFileSystem(State state)
throws IOException {
return HadoopUtils.getOptionallyThrottledFileSystem(WriterUtils.getWriterFS(state, 1, 0), state);
}
private static void setWorkUnitWeight(WorkUnit workUnit, CopyEntity copyEntity, long minWeight) {
long weight = 0;
if (copyEntity instanceof CopyableFile) {
weight = ((CopyableFile) copyEntity).getOrigin().getLen();
}
weight = Math.max(weight, minWeight);
workUnit.setProp(WORK_UNIT_WEIGHT, Long.toString(weight));
}
private static void computeAndSetWorkUnitGuid(WorkUnit workUnit)
throws IOException {
Guid guid = Guid.fromStrings(workUnit.contains(ConfigurationKeys.CONVERTER_CLASSES_KEY) ? workUnit
.getProp(ConfigurationKeys.CONVERTER_CLASSES_KEY) : "");
setWorkUnitGuid(workUnit, guid.append(deserializeCopyEntity(workUnit)));
}
/**
* Set a unique, replicable guid for this work unit. Used for recovering partially successful work units.
* @param state {@link State} where guid should be written.
* @param guid A byte array guid.
*/
public static void setWorkUnitGuid(State state, Guid guid) {
state.setProp(WORK_UNIT_GUID, guid.toString());
}
/**
* Get guid in this state if available. This is the reverse operation of {@link #setWorkUnitGuid}.
* @param state State from which guid should be extracted.
* @return A byte array guid.
* @throws IOException
*/
public static Optional<Guid> getWorkUnitGuid(State state)
throws IOException {
if (state.contains(WORK_UNIT_GUID)) {
return Optional.of(Guid.deserialize(state.getProp(WORK_UNIT_GUID)));
}
return Optional.absent();
}
/**
* Serialize a {@link List} of {@link CopyEntity}s into a {@link State} at {@link #SERIALIZED_COPYABLE_FILE}
*/
public static void serializeCopyEntity(State state, CopyEntity copyEntity) {
state.setProp(SERIALIZED_COPYABLE_FILE, CopyEntity.serialize(copyEntity));
state.setProp(COPY_ENTITY_CLASS, copyEntity.getClass().getName());
}
public static Class<?> getCopyEntityClass(State state)
throws IOException {
try {
return Class.forName(state.getProp(COPY_ENTITY_CLASS));
} catch (ClassNotFoundException cnfe) {
throw new IOException(cnfe);
}
}
/**
* Deserialize a {@link List} of {@link CopyEntity}s from a {@link State} at {@link #SERIALIZED_COPYABLE_FILE}
*/
public static CopyEntity deserializeCopyEntity(State state) {
return CopyEntity.deserialize(state.getProp(SERIALIZED_COPYABLE_FILE));
}
/**
* Serialize a {@link CopyableDataset} into a {@link State} at {@link #SERIALIZED_COPYABLE_DATASET}
*/
public static void serializeCopyableDataset(State state, CopyableDatasetMetadata copyableDataset) {
state.setProp(SERIALIZED_COPYABLE_DATASET, copyableDataset.serialize());
}
/**
* Deserialize a {@link CopyableDataset} from a {@link State} at {@link #SERIALIZED_COPYABLE_DATASET}
*/
public static CopyableDatasetMetadata deserializeCopyableDataset(State state) {
return CopyableDatasetMetadata.deserialize(state.getProp(SERIALIZED_COPYABLE_DATASET));
}
}
| 2,549 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/AllEqualComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.Serializable;
import java.util.Comparator;
/**
* Implementation of {@link java.util.Comparator} where all elements are equal.
*/
public class AllEqualComparator<T> implements Comparator<T>, Serializable {
private static final long serialVersionUID = 5144295901248792907L;
@Override public int compare(T o1, T o2) {
return 0;
}
}
| 2,550 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.util.guid.Guid;
import org.apache.gobblin.util.guid.HasGuid;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.Singular;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.codec.digest.DigestUtils;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
/**
* Abstraction for a work unit for distcp.
*/
@Getter
@Setter
@AllArgsConstructor(access = AccessLevel.PROTECTED)
@NoArgsConstructor(access = AccessLevel.PROTECTED)
@EqualsAndHashCode
@lombok.extern.slf4j.Slf4j
public class CopyEntity implements HasGuid {
public static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
/**
* File set this file belongs to. {@link CopyEntity}s in the same fileSet and originating from the same
* {@link CopyableDataset} will be treated as a unit: they will be published nearly atomically, and a notification
* will be emitted for each fileSet when it is published.
*/
private String fileSet;
/** Contains arbitrary metadata usable by converters and/or publisher. */
@Singular(value = "metadata")
private Map<String, String> additionalMetadata;
@Override
public Guid guid() throws IOException {
return Guid.fromStrings(toString());
}
/**
* Serialize an instance of {@link CopyEntity} into a {@link String}.
*
* @param copyEntity to be serialized
* @return serialized string
*/
public static String serialize(CopyEntity copyEntity) {
return GSON.toJson(copyEntity);
}
/**
* Serialize a {@link List} of {@link CopyEntity}s into a {@link String}.
*
* @param copyEntities to be serialized
* @return serialized string
*/
public static String serializeList(List<CopyEntity> copyEntities) {
return GSON.toJson(copyEntities, new TypeToken<List<CopyEntity>>() {}.getType());
}
/**
* Deserializes the serialized {@link CopyEntity} string.
*
* @param serialized string
* @return a new instance of {@link CopyEntity}
*/
public static CopyEntity deserialize(String serialized) {
return GSON.fromJson(getSerializedWithNewPackage(serialized), CopyEntity.class);
}
/**
* Deserializes the serialized {@link List} of {@link CopyEntity} string.
* Used together with {@link #serializeList(List)}
*
* @param serialized string
* @return a new {@link List} of {@link CopyEntity}s
*/
public static List<CopyEntity> deserializeList(String serialized) {
return GSON.fromJson(getSerializedWithNewPackage(serialized), new TypeToken<List<CopyEntity>>() {}.getType());
}
/**
* Converts package name in serialized string to new name.
* This is temporary change and should get removed after all the states are switched from old to new package name.
* @param serialized serialized string possibly having old package names
* @return
*/
public static String getSerializedWithNewPackage(String serialized) {
serialized = serialized.replace("\"gobblin.data.management.", "\"org.apache.gobblin.data.management.");
log.debug("Serialized updated copy entity: " + serialized);
return serialized;
}
@Override
public String toString() {
return serialize(this);
}
/**
* Get a {@link DatasetAndPartition} instance for the dataset and fileSet this {@link CopyEntity} belongs to.
* @param metadata {@link CopyableDatasetMetadata} for the dataset this {@link CopyEntity} belongs to.
* @return an instance of {@link DatasetAndPartition}
*/
public DatasetAndPartition getDatasetAndPartition(CopyableDatasetMetadata metadata) {
return new DatasetAndPartition(metadata, getFileSet());
}
/**
* Used for simulate runs. Should explain what this copy entity will do.
*/
public String explain() {
return toString();
}
/**
* Uniquely identifies a fileSet by also including the dataset metadata.
*/
@Data
@EqualsAndHashCode
public static class DatasetAndPartition {
private final CopyableDatasetMetadata dataset;
private final String partition;
/**
* @return a unique string identifier for this {@link DatasetAndPartition}.
*/
@SuppressWarnings("deprecation")
public String identifier() {
return Hex.encodeHexString(DigestUtils.sha(this.dataset.toString() + this.partition));
}
}
}
| 2,551 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableFileFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.util.Collection;
import org.apache.hadoop.fs.FileSystem;
/**
* A filter that is applied on all the {@link CopyableFile}s found by
* {@link CopyableDataset#getCopyableFiles(FileSystem, CopyConfiguration)}.
*/
public interface CopyableFileFilter {
/**
* Returns an filtered {@link Collection} of {@link CopyableFile}s. The filtering logic is implemented by the subclass
*
* @param sourceFs of the copyableFiles
* @param targetFs of the copyableFiles
* @param copyableFiles to be filtered
*
* @return a filtered collection of copyableFiles
*/
public Collection<CopyableFile> filter(FileSystem sourceFs, FileSystem targetFs,
Collection<CopyableFile> copyableFiles);
}
| 2,552 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableGlobDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* {@link org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder} that returns datasets of type
* {@link org.apache.gobblin.data.management.copy.RecursiveCopyableDataset}.N
*/
public class CopyableGlobDatasetFinder extends ConfigurableGlobDatasetFinder<CopyableDataset> {
public CopyableGlobDatasetFinder(FileSystem fs, Properties props) throws IOException {
super(fs, props);
}
@Override
public CopyableDataset datasetAtPath(Path path) throws IOException {
return new RecursiveCopyableDataset(this.fs, path, this.props, this.datasetPattern);
}
}
| 2,553 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ConcurrentBoundedWorkUnitList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.util.Map;
import lombok.Builder;
import lombok.Getter;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.TreeMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.source.workunit.WorkUnit;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link WorkUnit} container that is bounded, supports concurrent all-or-nothing addAll, and supports priority of
* file sets, ie. attempting to add a file set with higher priority will automatically evict
* lower priority {@link org.apache.gobblin.data.management.partition.FileSet}s if necessary.
*
* <p>
* File sets in {@link CopySource} are handled as {@link org.apache.gobblin.data.management.partition.FileSet}, so this class uses a {@link org.apache.gobblin.data.management.partition.FileSet} comparator
* for priority. If fileSetA < fileSetB, then fileSetA has higher priority than fileSetB
* (similar to {@link java.util.PriorityQueue}).
* </p>
*/
@Slf4j
class ConcurrentBoundedWorkUnitList {
private final TreeMap<FileSet<CopyEntity>, List<WorkUnit>> workUnitsMap;
@Getter
private final Comparator<FileSet<CopyEntity>> comparator;
private final int maxSize;
private final int strictMaxSize;
private int currentSize;
/** Set to true the first time a file set is rejected (i.e. doesn't fit in the container) */
private boolean rejectedFileSet;
private static class AugmentedComparator implements Comparator<FileSet<CopyEntity>> {
private final Comparator<FileSet<CopyEntity>> userProvidedComparator;
public AugmentedComparator(Comparator<FileSet<CopyEntity>> userProvidedComparator) {
this.userProvidedComparator = userProvidedComparator;
}
@Override
public int compare(FileSet<CopyEntity> p1, FileSet<CopyEntity> p2) {
int userProvidedCompare = this.userProvidedComparator.compare(p1, p2);
if (userProvidedCompare == 0) {
int datasetCompare = p1.getDataset().datasetURN().compareTo(p2.getDataset().datasetURN());
if (datasetCompare == 0) {
return p1.getName().compareTo(p2.getName());
}
return datasetCompare;
}
return userProvidedCompare;
}
}
/**
* Creates a new {@link ConcurrentBoundedWorkUnitList}.
* @param maxSize Maximum number of {@link WorkUnit}s to contain.
* @param comparator {@link Comparator} for {@link org.apache.gobblin.data.management.partition.FileSet}s to use for {@link org.apache.gobblin.data.management.partition.FileSet} priority.
* @param strictLimitMultiplier the list will only start rejecting {@link WorkUnit}s if its capacity exceeds
* maxSize * strictLimitMultiplier. If this parameter is < 1, it will be auto-set to 1.
*/
@Builder
public ConcurrentBoundedWorkUnitList(int maxSize, final Comparator<FileSet<CopyEntity>> comparator,
double strictLimitMultiplier) {
this.currentSize = 0;
this.maxSize = maxSize;
double actualStrictLimitMultiplier =
Math.min((Integer.MAX_VALUE / (double) this.maxSize), Math.max(1.0, strictLimitMultiplier));
this.strictMaxSize = (int) (this.maxSize * actualStrictLimitMultiplier);
this.comparator = comparator == null ? new AllEqualComparator<FileSet<CopyEntity>>() : comparator;
this.workUnitsMap = new TreeMap<>(new AugmentedComparator(this.comparator));
this.rejectedFileSet = false;
}
/**
* Add a file set to the container.
* @param fileSet File set, expressed as a {@link org.apache.gobblin.data.management.partition.FileSet} of {@link CopyEntity}s.
* @param workUnits List of {@link WorkUnit}s corresponding to this file set.
* @return true if the file set was added to the container, false otherwise (i.e. has reached max size).
*/
public boolean addFileSet(FileSet<CopyEntity> fileSet, List<WorkUnit> workUnits) {
boolean addedWorkunits = addFileSetImpl(fileSet, workUnits);
if (!addedWorkunits) {
this.rejectedFileSet = true;
}
return addedWorkunits;
}
private synchronized boolean addFileSetImpl(FileSet<CopyEntity> fileSet, List<WorkUnit> workUnits) {
if (this.currentSize + workUnits.size() > this.strictMaxSize) {
if (this.comparator.compare(this.workUnitsMap.lastKey(), fileSet) <= 0) {
return false;
}
int tmpSize = this.currentSize;
Set<FileSet<CopyEntity>> partitionsToDelete = Sets.newHashSet();
for (FileSet<CopyEntity> existingFileSet : this.workUnitsMap.descendingKeySet()) {
if (this.comparator.compare(existingFileSet, fileSet) <= 0) {
return false;
}
tmpSize -= this.workUnitsMap.get(existingFileSet).size();
partitionsToDelete.add(existingFileSet);
if (tmpSize + workUnits.size() <= this.strictMaxSize) {
break;
}
}
for (FileSet<CopyEntity> fileSetToRemove : partitionsToDelete) {
List<WorkUnit> workUnitsRemoved = this.workUnitsMap.remove(fileSetToRemove);
this.currentSize -= workUnitsRemoved.size();
}
}
// TreeMap determines key equality using provided comparator. If multiple fileSets have same priority, we need
// to concat their work units, otherwise only the last one will survive. Obviously, the comparator must be
// transitive, but it need not be consistent with equals.
if (!this.workUnitsMap.containsKey(fileSet)) {
this.workUnitsMap.put(fileSet, workUnits);
} else {
this.workUnitsMap.get(fileSet).addAll(workUnits);
}
this.currentSize += workUnits.size();
log.info(String.format("Added %d work units to bounded list. Total size: %d, soft limit: %d, hard limit: %d.",
workUnits.size(), this.currentSize, this.maxSize, this.strictMaxSize));
return true;
}
/**
* @return Whether any calls to {@link #addFileSet} have returned false, i.e. some file set has been rejected due
* to strict capacity issues.
*/
public boolean hasRejectedFileSet() {
return this.rejectedFileSet;
}
/**
* @return Whether the list has reached its max size.
*/
public synchronized boolean isFull() {
return this.currentSize >= this.maxSize;
}
/**
* Get the {@link List} of {@link WorkUnit}s in this container.
*/
public List<WorkUnit> getWorkUnits() {
ImmutableList.Builder<WorkUnit> allWorkUnits = ImmutableList.builder();
for (List<WorkUnit> workUnits : this.workUnitsMap.values()) {
allWorkUnits.addAll(workUnits);
}
return allWorkUnits.build();
}
/**
* Get the raw map backing this object.
*/
public Map<FileSet<CopyEntity>, List<WorkUnit>> getRawWorkUnitMap() {
return this.workUnitsMap;
}
}
| 2,554 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/TimeAwareRecursiveCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDateTime;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
@Slf4j
public class TimeAwareRecursiveCopyableDataset extends RecursiveCopyableDataset {
private static final String CONFIG_PREFIX = CopyConfiguration.COPY_PREFIX + ".recursive";
public static final String DATE_PATTERN_KEY = CONFIG_PREFIX + ".date.pattern";
public static final String LOOKBACK_TIME_KEY = CONFIG_PREFIX + ".lookback.time";
public static final String DEFAULT_DATE_PATTERN_TIMEZONE = ConfigurationKeys.PST_TIMEZONE_NAME;
public static final String DATE_PATTERN_TIMEZONE_KEY = CONFIG_PREFIX + ".datetime.timezone";
private final String lookbackTime;
private final String datePattern;
private final Period lookbackPeriod;
private final DateTimeZone dateTimeZone;
private final LocalDateTime currentTime;
public TimeAwareRecursiveCopyableDataset(FileSystem fs, Path rootPath, Properties properties, Path glob) {
super(fs, rootPath, properties, glob);
this.lookbackTime = properties.getProperty(LOOKBACK_TIME_KEY);
PeriodFormatter periodFormatter = new PeriodFormatterBuilder().appendDays()
.appendSuffix("d")
.appendHours()
.appendSuffix("h")
.appendMinutes()
.appendSuffix("m")
.toFormatter();
this.lookbackPeriod = periodFormatter.parsePeriod(lookbackTime);
this.datePattern = properties.getProperty(DATE_PATTERN_KEY);
this.dateTimeZone = DateTimeZone.forID(properties
.getProperty(DATE_PATTERN_TIMEZONE_KEY, DEFAULT_DATE_PATTERN_TIMEZONE));
this.currentTime = LocalDateTime.now(this.dateTimeZone);
this.validateLookbackWithDatePatternFormat(this.datePattern, this.lookbackTime);
}
void validateLookbackWithDatePatternFormat(String datePattern, String lookbackTime) {
DateTimeFormatter formatter = DateTimeFormat.forPattern(datePattern);
LocalDateTime refDateTime = new LocalDateTime(2017, 01, 31, 10, 59, 59);
String refDateTimeString = refDateTime.toString(formatter);
PeriodFormatterBuilder formatterBuilder;
// Validate that the lookback is supported for the time format
if (!refDateTime.withSecondOfMinute(0).toString(formatter).equals(refDateTimeString)) {
formatterBuilder = new PeriodFormatterBuilder().appendDays()
.appendSuffix("d")
.appendHours()
.appendSuffix("h")
.appendMinutes()
.appendSuffix("m")
.appendSeconds()
.appendSuffix("s");
if (!lookbackTimeMatchesFormat(formatterBuilder, lookbackTime)) {
throw new IllegalArgumentException(String.format("Expected lookback time to be in daily or hourly or minutely or secondly format, check %s",
LOOKBACK_TIME_KEY));
}
} else if (!refDateTime.withMinuteOfHour(0).toString(formatter).equals(refDateTimeString)) {
formatterBuilder = new PeriodFormatterBuilder().appendDays()
.appendSuffix("d")
.appendHours()
.appendSuffix("h")
.appendMinutes()
.appendSuffix("m");
if (!lookbackTimeMatchesFormat(formatterBuilder, lookbackTime)) {
throw new IllegalArgumentException(String.format("Expected lookback time to be in daily or hourly or minutely format, check %s",
LOOKBACK_TIME_KEY));
}
} else if (!refDateTime.withHourOfDay(0).toString(formatter).equals(refDateTimeString)) {
formatterBuilder = new PeriodFormatterBuilder().appendDays().appendSuffix("d").appendHours().appendSuffix("h");
if (!lookbackTimeMatchesFormat(formatterBuilder, lookbackTime)) {
throw new IllegalArgumentException(String.format("Expected lookback time to be in daily or hourly format, check %s", LOOKBACK_TIME_KEY));
}
} else if (!refDateTime.withDayOfMonth(1).toString(formatter).equals(refDateTimeString)) {
formatterBuilder = new PeriodFormatterBuilder().appendDays().appendSuffix("d");
if (!lookbackTimeMatchesFormat(formatterBuilder, lookbackTime)) {
throw new IllegalArgumentException(String.format("Expected lookback time to be in daily format, check %s", LOOKBACK_TIME_KEY));
}
}
}
private boolean lookbackTimeMatchesFormat(PeriodFormatterBuilder formatterBuilder, String lookbackTime) {
try {
formatterBuilder.toFormatter().parsePeriod(lookbackTime);
} catch (IllegalArgumentException e) {
return false;
}
return true;
}
@Override
protected List<FileStatus> getFilesAtPath(FileSystem fs, Path path, PathFilter fileFilter) throws IOException {
LocalDateTime endDate = currentTime;
DateTimeFormatter formatter = DateTimeFormat.forPattern(this.datePattern);
LocalDateTime startDate = formatter.parseLocalDateTime(endDate.minus(this.lookbackPeriod).toString(this.datePattern));
return recursivelyGetFilesAtDatePath(fs, path, "", fileFilter, 1, startDate, endDate, formatter);
}
/**
* Checks if the datePath provided is in the range of the start and end dates.
* Rounds startDate and endDate to the same granularity as datePath prior to comparing.
* @param startDate
* @param endDate
* @param datePath
* @param datePathFormat (This is the user set desired format)
* @param level
* @return true if the datePath provided is in the range of start and end dates, inclusive.
*/
public static Boolean checkPathDateTimeValidity(LocalDateTime startDate, LocalDateTime endDate, String datePath,
String datePathFormat, int level) {
String [] datePathFormatArray = datePathFormat.split("/");
String datePathPattern = String.join(FileSystems.getDefault().getSeparator(), Arrays.asList(datePathFormatArray).subList(0, level - 1));
try {
DateTimeFormatter formatGranularity = DateTimeFormat.forPattern(datePathPattern);
LocalDateTime traversedDatePathRound = formatGranularity.parseLocalDateTime(datePath);
LocalDateTime startDateRound = formatGranularity.parseLocalDateTime(startDate.toString(datePathPattern));
LocalDateTime endDateRound = formatGranularity.parseLocalDateTime(endDate.toString(datePathPattern));
return !traversedDatePathRound.isBefore(startDateRound) && !traversedDatePathRound.isAfter(endDateRound);
} catch (IllegalArgumentException e) {
log.error(String.format("Cannot parse path provided %s, expected in format of %s", datePath, datePathFormat));
return false;
}
}
private List<FileStatus> recursivelyGetFilesAtDatePath(FileSystem fs, Path path, String traversedDatePath, PathFilter fileFilter,
int level, LocalDateTime startDate, LocalDateTime endDate, DateTimeFormatter formatter) throws IOException {
List<FileStatus> fileStatuses = Lists.newArrayList();
if (!traversedDatePath.isEmpty()) {
if (!checkPathDateTimeValidity(startDate, endDate, traversedDatePath, this.datePattern, level)) {
return fileStatuses;
}
}
Iterator<FileStatus> folderIterator;
try {
if (!fs.exists(path)) {
return fileStatuses;
}
folderIterator = Arrays.asList(fs.listStatus(path)).iterator();
} catch (Exception e) {
log.warn(String.format("Error while listing paths at %s due to ", path), e);
return fileStatuses;
}
// Check if at the lowest level/granularity of the date folder
if (this.datePattern.split(FileSystems.getDefault().getSeparator()).length == level) {
// Truncate the start date to the most granular unit of time in the datepattern
while (folderIterator.hasNext()) {
Path folderPath = folderIterator.next().getPath();
String datePath = traversedDatePath.isEmpty() ? folderPath.getName() : new Path(traversedDatePath, folderPath.getName()).toString();
try {
LocalDateTime folderDate = formatter.parseLocalDateTime(datePath);
if (!folderDate.isBefore(startDate) && !folderDate.isAfter(endDate)) {
fileStatuses.addAll(super.getFilesAtPath(fs, folderPath, fileFilter));
}
} catch (IllegalArgumentException e) {
log.warn(String.format(
"Folder at path %s is not convertible to format %s. Please confirm that argument %s is valid", datePath,
this.datePattern, DATE_PATTERN_KEY));
}
}
} else {
// folder has a format such as yyyy/mm/dd/hh, so recursively find date paths
while (folderIterator.hasNext()) {
// Start building the date from top-down
String nextDate = folderIterator.next().getPath().getName();
String datePath = traversedDatePath.isEmpty() ? nextDate : new Path(traversedDatePath, nextDate).toString();
fileStatuses.addAll(recursivelyGetFilesAtDatePath(fs, new Path(path, nextDate), datePath, fileFilter, level + 1, startDate, endDate, formatter));
}
}
return fileStatuses;
}
}
| 2,555 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyManifest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import com.google.common.reflect.TypeToken;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.stream.JsonReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* Copy Manifest schema and serDe for manifest based copy
* https://iwww.corp.linkedin.com/wiki/cf/display/ENGS/Manifest+based+distcp+runbook
*/
public class CopyManifest {
private static final String MISSING_FN_MESSAGE = "fileName cannot be null";
private static final Gson GSON = new GsonBuilder().setPrettyPrinting().create();
private static final Type CopyableUnitListType = new TypeToken<ArrayList<CopyableUnit>>(){}.getType();
public final ArrayList<CopyableUnit> _copyableUnits;
public CopyManifest() {
_copyableUnits = new ArrayList<>();
}
public CopyManifest(ArrayList<CopyableUnit> copyableUnits) {
_copyableUnits = copyableUnits;
}
/**
* Add a new copyable unit to a copy manifest. Used for building a manifest
* @param copyableUnit
*/
public void add(CopyManifest.CopyableUnit copyableUnit) {
_copyableUnits.add(copyableUnit);
}
/**
* One item in a copy manifest
* Only filename is required
*/
public static class CopyableUnit {
public final String fileName;
public final String fileGroup;
public final Long fileSizeInBytes;
public final Long fileModificationTime;
public CopyableUnit(String fileName, String fileGroup, Long fileSizeInBytes, Long fileModificationTime) {
this.fileName = fileName;
this.fileGroup = fileGroup;
this.fileSizeInBytes = fileSizeInBytes;
this.fileModificationTime = fileModificationTime;
if (this.fileName == null) {
throw new IllegalArgumentException(MISSING_FN_MESSAGE);
}
}
}
/**
* Note: naive read does not do validation of schema. For schema validation use CopyableUnitIterator
* @param fs filsystem object used for accessing the filesystem
* @param path path manifest file location
* @return a copy manifest object from the json representation at path
* @throws IOException
*/
public static CopyManifest read(FileSystem fs, Path path) throws IOException {
JsonReader jsonReader = new JsonReader(new InputStreamReader(fs.open(path), "UTF-8"));
return new CopyManifest(GSON.fromJson(jsonReader, CopyableUnitListType));
}
/**
*
* @param fs filsystem object used for accessing the filesystem
* @param path path manifest file location
* @throws IOException
*/
public void write(FileSystem fs, Path path) throws IOException {
FSDataOutputStream out = null;
try {
String outputJson = GSON.toJson(this._copyableUnits, CopyableUnitListType);
out = fs.create(path, true);
out.write(outputJson.getBytes(StandardCharsets.UTF_8));
} finally {
if (out != null) {
out.close();
}
}
}
public static CopyableUnitIterator getReadIterator(FileSystem fs, Path path) throws IOException {
return new CopyableUnitIterator(fs, path);
}
/**
* An iterator for CopyManifest for more efficient reading
*/
public static class CopyableUnitIterator implements Iterator {
JsonReader reader;
public CopyableUnitIterator(FileSystem fs, Path path) throws IOException {
reader = new JsonReader(new InputStreamReader(fs.open(path), "UTF-8"));
reader.beginArray();
}
@Override
public boolean hasNext() {
try {
return reader.hasNext();
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
@Override
public CopyManifest.CopyableUnit next() {
CopyManifest.CopyableUnit copyableUnit = GSON.fromJson(reader, CopyManifest.CopyableUnit.class);
if (copyableUnit.fileName == null) {
throw new IllegalArgumentException(MISSING_FN_MESSAGE);
}
return copyableUnit;
}
public void close() throws IOException {
if (reader != null) {
reader.endArray();
reader.close();
}
}
}
}
| 2,556 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ManifestBasedDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.JsonIOException;
import com.google.gson.JsonSyntaxException;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import org.apache.gobblin.util.commit.SetPermissionCommitStep;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* A dataset that based on Manifest. We expect the Manifest contains the list of all the files for this dataset.
* At first phase, we only support copy across different clusters to the same location. (We can add more feature to support rename in the future)
* We will delete the file on target if it's listed in the manifest and not exist on source when {@link ManifestBasedDataset.DELETE_FILE_NOT_EXIST_ON_SOURCE} set to be true
*/
@Slf4j
public class ManifestBasedDataset implements IterableCopyableDataset {
private static final String DELETE_FILE_NOT_EXIST_ON_SOURCE = ManifestBasedDatasetFinder.CONFIG_PREFIX + ".deleteFileNotExistOnSource";
private static final String COMMON_FILES_PARENT = ManifestBasedDatasetFinder.CONFIG_PREFIX + ".commonFilesParent";
private static final String PERMISSION_CACHE_TTL_SECONDS = ManifestBasedDatasetFinder.CONFIG_PREFIX + ".permission.cache.ttl.seconds";
private static final String DEFAULT_PERMISSION_CACHE_TTL_SECONDS = "30";
private static final String DEFAULT_COMMON_FILES_PARENT = "/";
private final FileSystem srcFs;
private final FileSystem manifestReadFs;
private final Path manifestPath;
private final Properties properties;
private final boolean deleteFileThatNotExistOnSource;
private final String commonFilesParent;
private final int permissionCacheTTLSeconds;
public ManifestBasedDataset(final FileSystem srcFs, final FileSystem manifestReadFs, final Path manifestPath, final Properties properties) {
this.srcFs = srcFs;
this.manifestReadFs = manifestReadFs;
this.manifestPath = manifestPath;
this.properties = properties;
this.deleteFileThatNotExistOnSource = Boolean.parseBoolean(properties.getProperty(DELETE_FILE_NOT_EXIST_ON_SOURCE, "false"));
this.commonFilesParent = properties.getProperty(COMMON_FILES_PARENT, DEFAULT_COMMON_FILES_PARENT);
this.permissionCacheTTLSeconds = Integer.parseInt(properties.getProperty(PERMISSION_CACHE_TTL_SECONDS, DEFAULT_PERMISSION_CACHE_TTL_SECONDS));
}
@Override
public String datasetURN() {
return this.manifestPath.toString();
}
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
if (!manifestReadFs.exists(manifestPath)) {
throw new IOException(String.format("Manifest path %s does not exist on filesystem %s, skipping this manifest"
+ ", probably due to wrong configuration of %s or %s", manifestPath.toString(), manifestReadFs.getUri().toString(),
ManifestBasedDatasetFinder.MANIFEST_LOCATION, ManifestBasedDatasetFinder.MANIFEST_READ_FS_URI));
} else if (manifestReadFs.getFileStatus(manifestPath).isDirectory()) {
throw new IOException(String.format("Manifest path %s on filesystem %s is a directory, which is not supported. Please set the manifest file locations in"
+ "%s, you can specify multi locations split by '',", manifestPath.toString(), manifestReadFs.getUri().toString(),
ManifestBasedDatasetFinder.MANIFEST_LOCATION));
}
CopyManifest.CopyableUnitIterator manifests = null;
List<CopyEntity> copyEntities = Lists.newArrayList();
List<FileStatus> toDelete = Lists.newArrayList();
// map of paths and permissions sorted by depth of path, so that permissions can be set in order
Map<String, OwnerAndPermission> ancestorOwnerAndPermissions = new TreeMap<>(
(o1, o2) -> Long.compare(o2.chars().filter(ch -> ch == '/').count(), o1.chars().filter(ch -> ch == '/').count()));
try {
long startTime = System.currentTimeMillis();
manifests = CopyManifest.getReadIterator(this.manifestReadFs, this.manifestPath);
Cache<String, OwnerAndPermission> permissionMap = CacheBuilder.newBuilder().expireAfterAccess(permissionCacheTTLSeconds, TimeUnit.SECONDS).build();
int numFiles = 0;
while (manifests.hasNext()) {
numFiles++;
CopyManifest.CopyableUnit file = manifests.next();
//todo: We can use fileSet to partition the data in case of some softbound issue
//todo: After partition, change this to directly return iterator so that we can save time if we meet resources limitation
Path fileToCopy = new Path(file.fileName);
if (srcFs.exists(fileToCopy)) {
boolean existOnTarget = targetFs.exists(fileToCopy);
FileStatus srcFile = srcFs.getFileStatus(fileToCopy);
OwnerAndPermission replicatedPermission = CopyableFile.resolveReplicatedOwnerAndPermission(srcFs, srcFile, configuration);
if (!existOnTarget || shouldCopy(targetFs, srcFile, targetFs.getFileStatus(fileToCopy), replicatedPermission)) {
CopyableFile.Builder copyableFileBuilder =
CopyableFile.fromOriginAndDestination(srcFs, srcFile, fileToCopy, configuration)
.fileSet(datasetURN())
.datasetOutputPath(fileToCopy.toString())
.ancestorsOwnerAndPermission(
CopyableFile.resolveReplicatedOwnerAndPermissionsRecursivelyWithCache(srcFs, fileToCopy.getParent(),
new Path(commonFilesParent), configuration, permissionMap))
.destinationOwnerAndPermission(replicatedPermission);
CopyableFile copyableFile = copyableFileBuilder.build();
copyableFile.setFsDatasets(srcFs, targetFs);
copyEntities.add(copyableFile);
Path fromPath = srcFs.getFileStatus(fileToCopy).isDirectory() ? fileToCopy : fileToCopy.getParent();
ancestorOwnerAndPermissions.putAll(
CopyableFile.resolveReplicatedAncestorOwnerAndPermissionsRecursively(srcFs, fromPath,
new Path(commonFilesParent), configuration));
if (existOnTarget && srcFile.isFile()) {
// this is to match the existing publishing behavior where we won't rewrite the target when it's already existed
// todo: Change the publish behavior to support overwrite destination file during rename, instead of relying on this delete step which is needed if we want to support task level publish
toDelete.add(targetFs.getFileStatus(fileToCopy));
}
}
} else if (deleteFileThatNotExistOnSource && targetFs.exists(fileToCopy)) {
toDelete.add(targetFs.getFileStatus(fileToCopy));
}
}
Properties props = new Properties();
props.setProperty(SetPermissionCommitStep.STOP_ON_ERROR_KEY, "true");
CommitStep setPermissionCommitStep = new SetPermissionCommitStep(targetFs, ancestorOwnerAndPermissions, props);
copyEntities.add(new PostPublishStep(datasetURN(), Maps.newHashMap(), setPermissionCommitStep, 1));
if (!toDelete.isEmpty()) {
//todo: add support sync for empty dir
CommitStep step = new DeleteFileCommitStep(targetFs, toDelete, this.properties, Optional.<Path>absent());
copyEntities.add(new PrePublishStep(datasetURN(), Maps.newHashMap(), step, 1));
}
log.info(String.format("Workunits calculation took %s milliseconds to process %s files", System.currentTimeMillis() - startTime, numFiles));
} catch (JsonIOException | JsonSyntaxException e) {
//todo: update error message to point to a sample json file instead of schema which is hard to understand
log.warn(String.format("Failed to read Manifest path %s on filesystem %s, please make sure it's in correct json format with schema"
+ " {type:array, items:{type: object, properties:{id:{type:String}, fileName:{type:String}, fileGroup:{type:String}, fileSizeInBytes: {type:Long}}}}",
manifestPath.toString(), manifestReadFs.getUri().toString()), e);
throw new IOException(e);
} catch (Exception e) {
log.warn(String.format("Failed to process Manifest path %s on filesystem %s, due to", manifestPath.toString(), manifestReadFs.getUri().toString()), e);
throw new IOException(e);
} finally {
if (manifests != null) {
manifests.close();
}
}
return Collections.singleton(new FileSet.Builder<>(datasetURN(), this).add(copyEntities).build()).iterator();
}
private static boolean shouldCopy(FileSystem targetFs, FileStatus fileInSource, FileStatus fileInTarget, OwnerAndPermission replicatedPermission)
throws IOException {
if (fileInSource.isDirectory() || fileInSource.getModificationTime() == fileInTarget.getModificationTime()) {
// if source is dir or source and dst has same version, we compare the permission to determine whether it needs another sync
return !replicatedPermission.hasSameOwnerAndPermission(targetFs, fileInTarget);
}
return fileInSource.getModificationTime() > fileInTarget.getModificationTime();
}
}
| 2,557 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/RecursivePathFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.collect.Sets;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.AndPathFilter;
import org.apache.gobblin.util.filters.HiddenFilter;
/**
* Used to find the path recursively from the root based on the {@link PathFilter} created in the {@link Properties}
* @author mitu
*
*/
public class RecursivePathFinder {
private final Path rootPath;
private final FileSystem fs;
private final PathFilter pathFilter;
private final boolean includeEmptyDirectories;
private final boolean applyFilterToDirectories;
public RecursivePathFinder(final FileSystem fs, Path rootPath, Properties properties) {
this.rootPath = PathUtils.getPathWithoutSchemeAndAuthority(rootPath);
this.fs = fs;
this.pathFilter = DatasetUtils.instantiatePathFilter(properties);
this.includeEmptyDirectories =
Boolean.parseBoolean(properties.getProperty(CopyConfiguration.INCLUDE_EMPTY_DIRECTORIES));
this.applyFilterToDirectories = Boolean.parseBoolean(properties.getProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES));
}
public Set<FileStatus> getPaths(boolean skipHiddenPaths)
throws IOException {
if (!this.fs.exists(this.rootPath)) {
return Sets.newHashSet();
}
PathFilter actualFilter =
skipHiddenPaths ? new AndPathFilter(new HiddenFilter(), this.pathFilter) : this.pathFilter;
List<FileStatus> files =
FileListUtils.listFilesToCopyAtPath(this.fs, this.rootPath, actualFilter, this.applyFilterToDirectories, includeEmptyDirectories);
return Sets.newHashSet(files);
}
}
| 2,558 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableDatasetBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.dataset.Dataset;
/**
* A common superinterface for {@link Dataset}s that can be operated on by distcp.
* Concrete classes must implement a subinterface of this interface ({@link CopyableDataset} or {@link IterableCopyableDataset}).
*/
public interface CopyableDatasetBase extends Dataset {
default String getDatasetPath() { return ""; }
}
| 2,559 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/OwnerAndPermission.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import com.google.api.client.util.Lists;
/**
* Wrapper for owner, group, permission including sticky bit and ACL entry of a path.
*/
@Data
@AllArgsConstructor
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class OwnerAndPermission implements Writable {
private String owner;
private String group;
private FsPermission fsPermission;
private List<AclEntry> aclEntries;
public OwnerAndPermission (String owner, String group, FsPermission fsPermission) {
this(owner, group, fsPermission, Lists.newArrayList());
}
@Override
public void write(DataOutput dataOutput) throws IOException {
Text.writeString(dataOutput, this.owner);
Text.writeString(dataOutput, this.group);
this.fsPermission.write(dataOutput);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
this.owner = Text.readString(dataInput);
this.group = Text.readString(dataInput);
this.fsPermission = FsPermission.read(dataInput);
}
/**
* Read a {@link org.apache.gobblin.data.management.copy.OwnerAndPermission} from a {@link java.io.DataInput}.
* @throws IOException
*/
public static OwnerAndPermission read(DataInput input) throws IOException {
OwnerAndPermission oap = new OwnerAndPermission();
oap.readFields(input);
return oap;
}
/**
* given a file, return whether the metadata for the file match the current owner and permission
* note: if field is null, we always think it's match as no update needed.
* @param file the file status that need to be evaluated
* @return true if the metadata for the file match the current owner and permission
*/
public boolean hasSameOwnerAndPermission(FileSystem fs, FileStatus file) throws IOException {
return this.hasSameFSPermission(file) && this.hasSameGroup(file) && this.hasSameOwner(file) && this.hasSameAcls(fs.getAclStatus(file.getPath()).getEntries());
}
private boolean hasSameGroup(FileStatus file) {
return this.group == null || file.getGroup().equals(this.group);
}
private boolean hasSameOwner(FileStatus file) {
return this.owner == null || file.getOwner().equals(this.owner);
}
private boolean hasSameFSPermission(FileStatus file) {
return this.fsPermission == null || file.getPermission().equals(this.fsPermission);
}
private boolean hasSameAcls(List<AclEntry> acls) {
return this.aclEntries.isEmpty() || acls.equals(aclEntries);
}
}
| 2,560 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyResourcePool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import org.apache.gobblin.util.StringParsingUtils;
import org.apache.gobblin.util.request_allocation.ResourcePool;
import org.apache.gobblin.util.request_allocation.ResourceRequirement;
import lombok.Singular;
/**
* A {@link ResourcePool} for resources used in distcp: total bytes to copy, total number of {@link CopyEntity}s.
*/
public class CopyResourcePool extends ResourcePool {
public static final String SIZE_KEY = "size";
public static final String DEFAULT_MAX_SIZE = "10TB";
public static final String ENTITIES_KEY = "copyEntities";
public static final int DEFAULT_MAX_ENTITIES = 20000;
public static final String TOLERANCE_KEY = "boundTolerance";
public static final double DEFAULT_TOLERANCE = 2.;
private static final String ENTITIES_DIMENSION = "entities";
private static final String BYTES_DIMENSION = "bytesCopied";
/**
* Parse a {@link CopyResourcePool} from an input {@link Config}.
*/
public static CopyResourcePool fromConfig(Config limitedScopeConfig) {
try {
String sizeStr = limitedScopeConfig.hasPath(SIZE_KEY) ? limitedScopeConfig.getString(SIZE_KEY) : DEFAULT_MAX_SIZE;
long maxSize = StringParsingUtils.humanReadableToByteCount(sizeStr);
int maxEntities = limitedScopeConfig.hasPath(ENTITIES_KEY) ? limitedScopeConfig.getInt(ENTITIES_KEY) : DEFAULT_MAX_ENTITIES;
double tolerance = limitedScopeConfig.hasPath(TOLERANCE_KEY) ? limitedScopeConfig.getDouble(TOLERANCE_KEY) : DEFAULT_TOLERANCE;
return new CopyResourcePool(ImmutableMap.of(ENTITIES_DIMENSION, (double) maxEntities, BYTES_DIMENSION, (double) maxSize),
ImmutableMap.of(ENTITIES_DIMENSION, tolerance, BYTES_DIMENSION, tolerance),
ImmutableMap.<String, Double>of());
} catch (StringParsingUtils.FormatException fe) {
throw new RuntimeException(fe);
}
}
private CopyResourcePool(@Singular Map<String, Double> maxResources, @Singular Map<String, Double> tolerances,
@Singular Map<String, Double> defaults) {
super(maxResources, tolerances, defaults);
}
private CopyResourcePool(ResourcePool other) {
super(other);
}
@Override
public ResourceRequirement.Builder getResourceRequirementBuilder() {
return getCopyResourceRequirementBuilder();
}
public CopyResourceRequirementBuilder getCopyResourceRequirementBuilder() {
return new CopyResourceRequirementBuilder(this);
}
@Override
protected ResourcePool contractPool(ResourceRequirement requirement) {
ResourcePool superPool = super.contractPool(requirement);
return new CopyResourcePool(superPool);
}
public class CopyResourceRequirementBuilder extends ResourceRequirement.Builder {
private CopyResourceRequirementBuilder(CopyResourcePool pool) {
super(pool);
}
/**
* Set number of {@link CopyEntity}s in {@link org.apache.gobblin.data.management.partition.FileSet}.
*/
public CopyResourceRequirementBuilder setEntities(int numberOfEntities) {
setRequirement(ENTITIES_DIMENSION, (double) numberOfEntities);
return this;
}
/**
* Set total bytes to copy in {@link org.apache.gobblin.data.management.partition.FileSet}.
*/
public CopyResourceRequirementBuilder setBytes(long totalBytes) {
setRequirement(BYTES_DIMENSION, (double) totalBytes);
return this;
}
}
}
| 2,561 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.dataset.Dataset;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.FileSystem;
/**
* {@link Dataset} that supports finding {@link CopyEntity}s.
*/
public interface CopyableDataset extends CopyableDatasetBase {
/**
* Find all {@link CopyEntity}s in this dataset.
*
* <p>
* This method should return a collection of {@link CopyEntity}, each describing one work unit for distcp.
* The most common {@link CopyEntity} is the {@link org.apache.gobblin.data.management.copy.CopyableDataset}, describing a file
* that should be copied
* to the target.
* See {@link CopyableFile} for explanation of the information contained in the {@link CopyableFile}s.
* </p>
*
* @param targetFs target {@link org.apache.hadoop.fs.FileSystem} where copied files will be placed.
* @param configuration {@link org.apache.gobblin.data.management.copy.CopyConfiguration} for this job. See {@link org.apache.gobblin.data.management.copy.CopyConfiguration}.
* @return List of {@link CopyEntity}s in this dataset.
* @throws IOException
*/
public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs, CopyConfiguration configuration) throws
IOException;
}
| 2,562 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/SubsetFilesCopyableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.RegexPathFilter;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
* {@link SubsetFilesCopyableDataset} finder that extends {@link ConfigurableGlobDatasetFinder}.
*
* It lists files on the root path, and uses a user defined pattern to identify {@link SubsetFilesCopyableDataset}s and
* their corresponding subset of files.
*/
@Slf4j
public class SubsetFilesCopyableDatasetFinder extends ConfigurableGlobDatasetFinder<CopyableDataset> {
public static final String IDENTIFIER_PATTERN = CopyConfiguration.COPY_PREFIX + ".subsetFilesDatasetIdentifer";
public static final String DEFAULT_IDENTIFIER_PATTERN = "(.*)";
public static final String SUBSETFILES_REGEX_FILTER = CopyConfiguration.COPY_PREFIX + ".subsetFilesRegexFilter";
public static final String DEFAULT_SUBSETFILES_REGEX_FILTER = ".*";
protected final Path rootPath;
protected Pattern identifierPattern;
@Getter
@Setter
protected PathFilter pathFilter;
protected final Map<String, List<FileStatus>> idToFileStatuses;
private Optional<EventSubmitter> eventSubmitter;
private SourceState state;
public SubsetFilesCopyableDatasetFinder(FileSystem fs, Properties props)
throws IOException {
super(fs, props);
this.identifierPattern = Pattern.compile(props.getProperty(IDENTIFIER_PATTERN, DEFAULT_IDENTIFIER_PATTERN));
this.pathFilter =
new RegexPathFilter(props.getProperty(SUBSETFILES_REGEX_FILTER, DEFAULT_SUBSETFILES_REGEX_FILTER));
this.rootPath = PathUtils.deepestNonGlobPath(this.datasetPattern);
this.idToFileStatuses = new HashMap<>();
}
public SubsetFilesCopyableDatasetFinder(FileSystem fs, Properties props, EventSubmitter eventSubmitter)
throws IOException {
this(fs, props);
this.eventSubmitter = Optional.of(eventSubmitter);
}
public SubsetFilesCopyableDatasetFinder(FileSystem fs, Properties props, EventSubmitter eventSubmitter,
SourceState state)
throws IOException {
this(fs, props, eventSubmitter);
this.state = state;
}
@Override
public List<CopyableDataset> findDatasets()
throws IOException {
List<CopyableDataset> datasets = Lists.newArrayList();
FileStatus[] fileStatuss = this.getDatasetDirs();
for (FileStatus datasetRootDir : fileStatuss) {
datasets.addAll(this.generateDatasetsByIdentifier(datasetRootDir.getPath()));
}
return datasets;
}
public List<CopyableDataset> generateDatasetsByIdentifier(Path datasetRootDirPath)
throws IOException {
List<CopyableDataset> datasets = Lists.newArrayList();
FileStatus[] fileStatuses = fs.listStatus(datasetRootDirPath, this.getPathFilter());
for (FileStatus fileStatus : fileStatuses) {
Matcher result = this.identifierPattern.matcher(fileStatus.getPath().getName().toString());
if (result.find()) {
String id = result.group(1);
if (idToFileStatuses.containsKey(id)) {
log.debug("Adding " + fileStatus.getPath() + " to " + id);
idToFileStatuses.get(id).add(fileStatus);
} else {
List<FileStatus> entry = new ArrayList<>();
entry.add(fileStatus);
log.debug("Adding " + fileStatus.getPath() + " to " + id);
idToFileStatuses.put(id, entry);
}
}
}
for (String id : idToFileStatuses.keySet()) {
datasets.add(this.datasetAndPathWithIdentifier(datasetRootDirPath, id));
}
return datasets;
}
public CopyableDataset datasetAndPathWithIdentifier(Path path, String identifier)
throws IOException {
try {
return GobblinConstructorUtils
.invokeLongestConstructor(SubsetFilesCopyableDataset.class, fs, path, props, identifier,
idToFileStatuses.get(identifier), eventSubmitter, state);
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
}
@Override
public CopyableDataset datasetAtPath(Path path)
throws IOException {
throw new IOException("Not supported in " + this.getClass().getSimpleName());
}
}
| 2,563 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/IterableCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.partition.FileSet;
/**
* A {@link CopyableDatasetBase} that returns {@link CopyEntity}s as an iterator. It allows for scanning for files to
* copy only when necessary. Reduces unnecessary work when the queue of {@link CopyEntity}s is full.
*/
public interface IterableCopyableDataset extends CopyableDatasetBase {
/**
* Get an iterator of {@link FileSet}s of {@link CopyEntity}, each one representing a group of files to copy and
* associated actions.
* @param targetFs target {@link org.apache.hadoop.fs.FileSystem} where copied files will be placed.
* @param configuration {@link org.apache.gobblin.data.management.copy.CopyConfiguration} for this job. See {@link org.apache.gobblin.data.management.copy.CopyConfiguration}.
* @throws IOException
*/
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration)
throws IOException;
}
| 2,564 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CloseableFsCopySource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.extractor.CloseableFsFileAwareInputStreamExtractor;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.sftp.SftpLightWeightFileSystem;
import org.apache.gobblin.util.HadoopUtils;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.io.Closer;
/**
* Used instead of {@link CopySource} for {@link FileSystem}s that need be closed after use E.g
* {@link SftpLightWeightFileSystem}.
* <p>
* Note that all {@link FileSystem} implementations should not be closed as Hadoop's
* {@link FileSystem#get(org.apache.hadoop.conf.Configuration)} API returns a cached copy of {@link FileSystem} by
* default. The same {@link FileSystem} instance may be used by other classes in the same JVM. Closing a cached
* {@link FileSystem} may cause {@link IOException} at other parts of the code using the same instance.
* </p>
* <p>
* For {@link SftpLightWeightFileSystem} a new instance is returned on every
* {@link FileSystem#get(org.apache.hadoop.conf.Configuration)} call. Closing is necessary as the file system maintains
* a session with the remote server.
*
* @see HadoopUtils#newConfiguration()
* @See SftpLightWeightFileSystem
* </p>
*/
@Slf4j
public class CloseableFsCopySource extends CopySource {
private final Closer closer = Closer.create();
@Override
protected FileSystem getSourceFileSystem(State state)
throws IOException {
return this.closer.register(HadoopUtils.getSourceFileSystem(state));
}
@Override
public void shutdown(SourceState state) {
try {
this.closer.close();
} catch (IOException e) {
log.warn("Failed to close all closeables", e);
}
}
@Override
protected Extractor<String, FileAwareInputStream> extractorForCopyableFile(FileSystem fs, CopyableFile cf,
WorkUnitState state)
throws IOException {
return new CloseableFsFileAwareInputStreamExtractor(fs, cf, state);
}
}
| 2,565 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/RecursiveCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.commit.SetPermissionCommitStep;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
/**
* Implementation of {@link CopyableDataset} that creates a {@link CopyableFile} for every file that is a descendant if
* the root directory.
*/
@Slf4j
public class RecursiveCopyableDataset implements CopyableDataset, FileSystemDataset {
private static final String CONFIG_PREFIX = CopyConfiguration.COPY_PREFIX + ".recursive";
/** Like -update in distcp. Will update files that are different between source and target, and skip files already in target. */
public static final String UPDATE_KEY = CONFIG_PREFIX + ".update";
/** Like -delete in distcp. Will delete files in target that don't exist in source. */
public static final String DELETE_KEY = CONFIG_PREFIX + ".delete";
/** If true, will delete newly empty directories up to the dataset root. */
public static final String DELETE_EMPTY_DIRECTORIES_KEY = CONFIG_PREFIX + ".deleteEmptyDirectories";
/** If true, will use our new logic to preserve permissions, owner, and group of ancestors. */
public static final String USE_NEW_PRESERVE_LOGIC_KEY = CONFIG_PREFIX + ".useNewPreserveLogic";
private final Path rootPath;
private final FileSystem fs;
private final PathFilter pathFilter;
// Glob used to find this dataset
private final Path glob;
private final CopyableFileFilter copyableFileFilter;
private final boolean update;
private final boolean delete;
// Include empty directories in the source for copy
private final boolean includeEmptyDirectories;
// Delete empty directories in the destination
private final boolean deleteEmptyDirectories;
//Apply filter to directories
private final boolean applyFilterToDirectories;
// Use new preserve logic which recurses down and walks the parent links up for preservation of permissions, user, and group.
private final boolean useNewPreserveLogic;
private final Properties properties;
public RecursiveCopyableDataset(final FileSystem fs, Path rootPath, Properties properties, Path glob) {
this.rootPath = PathUtils.getPathWithoutSchemeAndAuthority(rootPath);
this.fs = fs;
this.pathFilter = DatasetUtils.instantiatePathFilter(properties);
this.copyableFileFilter = DatasetUtils.instantiateCopyableFileFilter(properties);
this.glob = glob;
this.update = Boolean.parseBoolean(properties.getProperty(UPDATE_KEY));
this.delete = Boolean.parseBoolean(properties.getProperty(DELETE_KEY));
this.deleteEmptyDirectories = Boolean.parseBoolean(properties.getProperty(DELETE_EMPTY_DIRECTORIES_KEY));
this.includeEmptyDirectories =
Boolean.parseBoolean(properties.getProperty(CopyConfiguration.INCLUDE_EMPTY_DIRECTORIES));
this.applyFilterToDirectories =
Boolean.parseBoolean(properties.getProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES, "false"));
this.useNewPreserveLogic = Boolean.parseBoolean(properties.getProperty(USE_NEW_PRESERVE_LOGIC_KEY));
this.properties = properties;
}
protected Collection<? extends CopyEntity> getCopyableFilesImpl(CopyConfiguration configuration,
Map<Path, FileStatus> filesInSource,
Map<Path, FileStatus> filesInTarget,
FileSystem targetFs,
Path replacedPrefix,
Path replacingPrefix,
Path deleteEmptyDirectoriesUpTo) throws IOException {
List<Path> toCopy = Lists.newArrayList();
Map<Path, FileStatus> toDelete = Maps.newHashMap();
boolean requiresUpdate = false;
for (Map.Entry<Path, FileStatus> entry : filesInSource.entrySet()) {
FileStatus statusInTarget = filesInTarget.remove(entry.getKey());
if (statusInTarget != null) {
// in both
if (!sameFile(filesInSource.get(entry.getKey()), statusInTarget)) {
toCopy.add(entry.getKey());
toDelete.put(entry.getKey(), statusInTarget);
requiresUpdate = true;
}
} else {
toCopy.add(entry.getKey());
}
}
if (!this.update && requiresUpdate) {
throw new IOException("Some files need to be copied but they already exist in the destination. "
+ "Aborting because not running in update mode.");
}
if (this.delete) {
toDelete.putAll(filesInTarget);
}
List<CopyEntity> copyEntities = Lists.newArrayList();
List<CopyableFile> copyableFiles = Lists.newArrayList();
// map of paths and permissions sorted by depth of path, so that permissions can be set in order
Map<String, OwnerAndPermission> ancestorOwnerAndPermissions = new TreeMap<>(
(o1, o2) -> Long.compare(o2.chars().filter(ch -> ch == '/').count(), o1.chars().filter(ch -> ch == '/').count()));
for (Path path : toCopy) {
FileStatus file = filesInSource.get(path);
Path filePathRelativeToSearchPath = PathUtils.relativizePath(file.getPath(), replacedPrefix);
Path thisTargetPath = new Path(replacingPrefix, filePathRelativeToSearchPath);
if (this.useNewPreserveLogic) {
ancestorOwnerAndPermissions.putAll(CopyableFile
.resolveReplicatedAncestorOwnerAndPermissionsRecursively(this.fs, file.getPath().getParent(),
replacedPrefix, configuration));
}
CopyableFile copyableFile =
CopyableFile.fromOriginAndDestination(this.fs, file, thisTargetPath, configuration)
.fileSet(datasetURN())
.datasetOutputPath(thisTargetPath.toString())
.ancestorsOwnerAndPermission(CopyableFile
.resolveReplicatedOwnerAndPermissionsRecursively(this.fs, file.getPath().getParent(),
replacedPrefix, configuration))
.build();
copyableFile.setFsDatasets(this.fs, targetFs);
copyableFiles.add(copyableFile);
}
copyEntities.addAll(this.copyableFileFilter.filter(this.fs, targetFs, copyableFiles));
if (!toDelete.isEmpty()) {
CommitStep step = new DeleteFileCommitStep(targetFs, toDelete.values(), this.properties,
this.deleteEmptyDirectories ? Optional.of(deleteEmptyDirectoriesUpTo) : Optional.<Path>absent());
copyEntities.add(new PrePublishStep(datasetURN(), Maps.newHashMap(), step, 1));
}
if (this.useNewPreserveLogic) {
Properties props = new Properties();
props.setProperty(SetPermissionCommitStep.STOP_ON_ERROR_KEY, "true");
CommitStep step = new SetPermissionCommitStep(targetFs, ancestorOwnerAndPermissions, props);
copyEntities.add(new PostPublishStep(datasetURN(), Maps.newHashMap(), step, 1));
}
return copyEntities;
}
@Override
public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
Path nonGlobSearchPath = PathUtils.deepestNonGlobPath(this.glob);
Path targetPath =
new Path(configuration.getPublishDir(), PathUtils.relativizePath(this.rootPath, nonGlobSearchPath));
Map<Path, FileStatus> filesInSource =
createPathMap(getFilesAtPath(this.fs, this.rootPath, this.pathFilter), this.rootPath);
Map<Path, FileStatus> filesInTarget =
createPathMap(getFilesAtPath(targetFs, targetPath, this.pathFilter), targetPath);
return getCopyableFilesImpl(configuration, filesInSource, filesInTarget, targetFs,
nonGlobSearchPath, configuration.getPublishDir(), targetPath);
}
@VisibleForTesting
protected List<FileStatus> getFilesAtPath(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
try {
return FileListUtils
.listFilesToCopyAtPath(fs, path, fileFilter, applyFilterToDirectories, includeEmptyDirectories);
} catch (FileNotFoundException fnfe) {
log.warn(String.format("Could not find any files on fs %s path %s due to the following exception. Returning an empty list of files.", fs.getUri(), path), fnfe);
return Lists.newArrayList();
}
}
@Override
public Path datasetRoot() {
return this.rootPath;
}
@Override
public String datasetURN() {
return datasetRoot().toString();
}
private Map<Path, FileStatus> createPathMap(List<FileStatus> files, Path prefix) {
Map<Path, FileStatus> map = Maps.newHashMap();
for (FileStatus status : files) {
map.put(PathUtils.relativizePath(status.getPath(), prefix), status);
}
return map;
}
private static boolean sameFile(FileStatus fileInSource, FileStatus fileInTarget) {
return fileInTarget.getLen() == fileInSource.getLen() && fileInSource.getModificationTime() <= fileInTarget
.getModificationTime();
}
@Override
public String getDatasetPath() {
return Path.getPathWithoutSchemeAndAuthority(this.rootPath).toString();
}
}
| 2,566 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/UnixTimestampRecursiveCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDate;
import org.joda.time.LocalDateTime;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.collect.Lists;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.AndPathFilter;
/**
* This dataset filters file paths based on a {@link #timestampPattern} and {@link #versionSelectionPolicy}
*
* The default regex will match the first occurrence of a directory matching the pattern after the dataset root
*
*/
public class UnixTimestampRecursiveCopyableDataset extends RecursiveCopyableDataset {
private static final String CONFIG_PREFIX = CopyConfiguration.COPY_PREFIX + ".recursive";
public static final String VERSION_SELECTION_POLICY = CONFIG_PREFIX + ".version.selection.policy";
public static final String TIMESTAMP_REGEEX = CONFIG_PREFIX + ".timestamp.pattern";
public static final String DEFAULT_TIMESTAMP_REGEX = ".*/([0-9]{13}).*/.*";
private final String lookbackTime;
private final Period lookbackPeriod;
private final LocalDateTime currentTime;
private final VersionSelectionPolicy versionSelectionPolicy;
private final DateTimeZone dateTimeZone;
private final Pattern timestampPattern;
public UnixTimestampRecursiveCopyableDataset(FileSystem fs, Path rootPath, Properties properties, Path glob) {
super(fs, rootPath, properties, glob);
this.lookbackTime = properties.getProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY);
this.versionSelectionPolicy =
VersionSelectionPolicy.valueOf(properties.getProperty(VERSION_SELECTION_POLICY).toUpperCase());
PeriodFormatter periodFormatter = new PeriodFormatterBuilder().appendDays().appendSuffix("d").toFormatter();
this.lookbackPeriod = periodFormatter.parsePeriod(lookbackTime);
String timestampRegex = properties.getProperty(TIMESTAMP_REGEEX, DEFAULT_TIMESTAMP_REGEX);
this.timestampPattern = Pattern.compile(timestampRegex);
this.dateTimeZone = DateTimeZone.forID(properties
.getProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_TIMEZONE_KEY,
TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
this.currentTime = LocalDateTime.now(this.dateTimeZone);
}
private enum VersionSelectionPolicy {
EARLIEST, LATEST, ALL
}
/**
* Given a lookback period, this filter extracts the timestamp from the path
* based on {@link #timestampPattern} and filters out the paths that are out the date range
*
*/
class TimestampPathFilter implements PathFilter {
@Override
public boolean accept(Path path) {
LocalDate endDate = currentTime.toLocalDate();
LocalDate startDate = endDate.minus(lookbackPeriod);
Path relativePath = PathUtils.relativizePath(PathUtils.getPathWithoutSchemeAndAuthority(path), datasetRoot());
Matcher matcher = timestampPattern.matcher(relativePath.toString());
if (!matcher.matches()) {
return false;
}
Long timestamp = Long.parseLong(matcher.group(1));
LocalDate dateOfTimestamp = new LocalDateTime(timestamp, dateTimeZone).toLocalDate();
return !(dateOfTimestamp == null || dateOfTimestamp.isAfter(endDate) || dateOfTimestamp.isEqual(startDate)
|| dateOfTimestamp.isBefore(startDate));
}
}
@Override
protected List<FileStatus> getFilesAtPath(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
// Filter files by lookback period (fileNames >= startDate and fileNames <= endDate)
PathFilter andPathFilter = new AndPathFilter(fileFilter, new TimestampPathFilter());
List<FileStatus> files = super.getFilesAtPath(fs, path, andPathFilter);
if (VersionSelectionPolicy.ALL == versionSelectionPolicy) {
return files;
}
Map<Pair<String, LocalDate>, TreeMap<Long, List<FileStatus>>> pathTimestampFilesMap = new HashMap<>();
// Now select files per day based on version selection policy
for (FileStatus fileStatus : files) {
String relativePath = PathUtils.relativizePath(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()), datasetRoot()).toString();
Matcher matcher = timestampPattern.matcher(relativePath);
if (!matcher.matches()) {
continue;
}
String timestampStr = matcher.group(1);
String rootPath = relativePath.substring(0, relativePath.indexOf(timestampStr));
Long unixTimestamp = Long.parseLong(timestampStr);
LocalDate localDate = new LocalDateTime(unixTimestamp,dateTimeZone).toLocalDate();
Pair<String, LocalDate> key = new ImmutablePair<>(rootPath, localDate);
if (!pathTimestampFilesMap.containsKey(key)) {
pathTimestampFilesMap.put(key, new TreeMap<Long, List<FileStatus>>());
}
Map<Long, List<FileStatus>> timestampFilesMap = pathTimestampFilesMap.get(key);
if (!timestampFilesMap.containsKey(unixTimestamp)) {
timestampFilesMap.put(unixTimestamp, Lists.newArrayList());
}
List<FileStatus> filesStatuses = timestampFilesMap.get(unixTimestamp);
filesStatuses.add(fileStatus);
}
List<FileStatus> result = new ArrayList<>();
for(TreeMap<Long, List<FileStatus>> timestampFileStatus : pathTimestampFilesMap.values()) {
if(timestampFileStatus.size() <=0 ) {
continue;
}
switch (versionSelectionPolicy) {
case EARLIEST:
result.addAll(timestampFileStatus.firstEntry().getValue());
break;
case LATEST:
result.addAll(timestampFileStatus.lastEntry().getValue());
break;
default:
throw new RuntimeException("Unsupported version selection policy");
}
}
return result;
}
}
| 2,567 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/IterableCopyableDatasetImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.Dataset;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import org.apache.hadoop.fs.FileSystem;
/**
* Wraps a {@link CopyableDataset} to produce an {@link IterableCopyableDataset}.
*/
@AllArgsConstructor
public class IterableCopyableDatasetImpl implements IterableCopyableDataset {
private final CopyableDataset dataset;
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
return partitionCopyableFiles(this.dataset, this.dataset.getCopyableFiles(targetFs, configuration));
}
@Override
public String datasetURN() {
return this.dataset.datasetURN();
}
private static Iterator<FileSet<CopyEntity>> partitionCopyableFiles(Dataset dataset,
Collection<? extends CopyEntity> files) {
Map<String, FileSet.Builder<CopyEntity>> partitionBuildersMaps = Maps.newHashMap();
for (CopyEntity file : files) {
if (!partitionBuildersMaps.containsKey(file.getFileSet())) {
partitionBuildersMaps.put(file.getFileSet(), new FileSet.Builder<>(file.getFileSet(), dataset));
}
partitionBuildersMaps.get(file.getFileSet()).add(file);
}
return Iterators.transform(partitionBuildersMaps.values().iterator(),
new Function<FileSet.Builder<CopyEntity>, FileSet<CopyEntity>>() {
@Nullable
@Override
public FileSet<CopyEntity> apply(@Nonnull FileSet.Builder<CopyEntity> input) {
return input.build();
}
});
}
}
| 2,568 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/SubsetFilesCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.PathUtils;
import lombok.extern.slf4j.Slf4j;
/**
* Implementation of {@link CopyableDataset} and {@link FileSystemDataset} that has an identifier in a root directory,
* and only contains a subset of files in the root directory.
*/
@Slf4j
public class SubsetFilesCopyableDataset implements CopyableDataset, FileSystemDataset {
private final Path rootPath;
private final FileSystem fs;
private final List<FileStatus> files;
private final String identifier;
private final Properties props;
private EventSubmitter eventSubmitter;
private SourceState state;
public SubsetFilesCopyableDataset(final FileSystem fs, Path rootPath, Properties properties, String idenifier,
List<FileStatus> subFiles) {
this.rootPath = PathUtils.getPathWithoutSchemeAndAuthority(rootPath);
this.fs = fs;
this.files = subFiles;
this.identifier = idenifier;
this.props = properties;
}
public SubsetFilesCopyableDataset(final FileSystem fs, Path rootPath, Properties properties, String idenifier,
List<FileStatus> subFiles, EventSubmitter eventSubmitter) {
this(fs, rootPath, properties, idenifier, subFiles);
this.eventSubmitter = eventSubmitter;
}
public SubsetFilesCopyableDataset(final FileSystem fs, Path rootPath, Properties properties, String idenifier,
List<FileStatus> subFiles, EventSubmitter eventSubmitter, SourceState state) {
this(fs, rootPath, properties, idenifier, subFiles, eventSubmitter);
this.state = state;
}
@Override
public String datasetURN() {
return (rootPath + this.identifier).replace('/', '_');
}
@Override
public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
List<CopyableFile> copyableFiles = Lists.newArrayList();
for (FileStatus fileStatus : this.files) {
if (this.shouldAddToCopyableFiles(fileStatus)) {
log.debug("Adding copyable file " + fileStatus.getPath() + "for " + identifier + " in " + this.rootPath);
Path targetPath = this.getTargetPath(configuration.getPublishDir(), fileStatus.getPath(), this.identifier);
copyableFiles.add(CopyableFile.fromOriginAndDestination(this.fs, fileStatus, targetPath, configuration)
.destinationOwnerAndPermission(this.getDestinationOwnerAndPermission()).fileSet(this.fileSet(fileStatus))
.build());
}
}
return DatasetUtils.instantiateCopyableFileFilter(this.props, this.state, this)
.filter(this.fs, targetFs, copyableFiles);
}
public Path getTargetPath(Path publishDir, Path originPath, String identifier) {
Path filePathRelativeToSearchPath = PathUtils.relativizePath(originPath, this.rootPath);
return new Path(publishDir, filePathRelativeToSearchPath);
}
public OwnerAndPermission getDestinationOwnerAndPermission() {
return null;
}
public boolean shouldAddToCopyableFiles(FileStatus fileStatus) {
return true;
}
public String fileSet(FileStatus fileStatus) {
return this.identifier;
}
@Override
public Path datasetRoot() {
return rootPath;
}
}
| 2,569 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* Context that can hold global objects required in a single copy job.
*/
@Slf4j
public class CopyContext {
/**
* Cache for {@link FileStatus}es for various paths in {@link org.apache.hadoop.fs.FileSystem}s. Used to reduce
* the number of calls to {@link org.apache.hadoop.fs.FileSystem#getFileStatus} when replicating attributes. Keys
* should be fully qualified paths in case multiple {@link org.apache.hadoop.fs.FileSystem}s are in use.
*/
private final Cache<Path, Optional<FileStatus>> fileStatusCache;
public CopyContext() {
this.fileStatusCache = CacheBuilder.newBuilder().recordStats().maximumSize(10000).build();
}
/**
* Get cached {@link FileStatus}.
*/
public Optional<FileStatus> getFileStatus(final FileSystem fs, final Path path) throws IOException {
try {
return this.fileStatusCache.get(fs.makeQualified(path), new Callable<Optional<FileStatus>>() {
@Override
public Optional<FileStatus> call()
throws Exception {
try {
return Optional.of(fs.getFileStatus(path));
} catch (FileNotFoundException fnfe) {
return Optional.absent();
}
}
});
} catch (ExecutionException ee) {
throw new IOException(ee.getCause());
}
}
public void logCacheStatistics() {
log.info(this.fileStatusCache.stats().toString());
}
}
| 2,570 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ReadyCopyableFileFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.util.PathUtils;
/**
* A {@link CopyableFileFilter} that drops a {@link CopyableFile} if another file with "filename.ready" is not found on
* the <code>sourceFs<code>
*/
@Slf4j
public class ReadyCopyableFileFilter implements CopyableFileFilter {
public static final String READY_EXTENSION = ".ready";
/**
* For every {@link CopyableFile} in <code>copyableFiles</code> checks if a {@link CopyableFile#getOrigin()#getPath()}
* + .ready files is present on <code>sourceFs</code> {@inheritDoc}
*
* @see org.apache.gobblin.data.management.copy.CopyableFileFilter#filter(org.apache.hadoop.fs.FileSystem,
* org.apache.hadoop.fs.FileSystem, java.util.Collection)
*/
@Override
public Collection<CopyableFile> filter(FileSystem sourceFs, FileSystem targetFs,
Collection<CopyableFile> copyableFiles) {
Iterator<CopyableFile> cfIterator = copyableFiles.iterator();
ImmutableList.Builder<CopyableFile> filtered = ImmutableList.builder();
while (cfIterator.hasNext()) {
CopyableFile cf = cfIterator.next();
Path readyFilePath = PathUtils.addExtension(cf.getOrigin().getPath(), READY_EXTENSION);
try {
if (sourceFs.exists(readyFilePath)) {
filtered.add(cf);
} else {
log.info(String.format("Removing %s as the .ready file is not found", cf.getOrigin().getPath()));
}
} catch (IOException e) {
log.warn(String.format("Removing %s as the .ready file can not be read. Exception %s",
cf.getOrigin().getPath(), e.getMessage()));
}
}
return filtered.build();
}
}
| 2,571 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/TimeAwareCopyableGlobDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* {@link org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder} that returns datasets of type
* {@link org.apache.gobblin.data.management.copy.TimeAwareRecursiveCopyableDataset}.N
*/
public class TimeAwareCopyableGlobDatasetFinder extends ConfigurableGlobDatasetFinder<CopyableDataset> {
public TimeAwareCopyableGlobDatasetFinder(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public CopyableDataset datasetAtPath(Path path) throws IOException {
return new TimeAwareRecursiveCopyableDataset(this.fs, path, this.props, this.datasetPattern);
}
}
| 2,572 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.stream.JsonWriter;
import lombok.AccessLevel;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.apache.gobblin.data.management.copy.PreserveAttributes.Option;
import org.apache.gobblin.data.management.partition.File;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.guid.Guid;
/**
* Abstraction for a file to copy from {@link #origin} to {@link #destination}. {@link CopyableFile}s should be
* created using a {@link CopyableFile.Builder} obtained with the method {@link CopyableFile#builder}.
*/
@Getter
@Setter
@NoArgsConstructor(access = AccessLevel.PROTECTED)
@EqualsAndHashCode(callSuper = true)
@Slf4j
public class CopyableFile extends CopyEntity implements File {
private static final byte[] EMPTY_CHECKSUM = new byte[0];
/**
* The source data the file belongs to. For now, since it's only used before copying, set it to be
* transient so that it won't be serialized, avoid unnecessary data transfer
*/
private transient Descriptor sourceData;
/** {@link FileStatus} of the existing origin file. */
private FileStatus origin;
/** The destination data the file will be copied to */
private Descriptor destinationData;
/** Complete destination {@link Path} of the file. */
private Path destination;
/** Common path for dataset to which this CopyableFile belongs. */
public String datasetOutputPath;
/** Desired {@link OwnerAndPermission} of the destination path. */
private OwnerAndPermission destinationOwnerAndPermission;
/**
* Desired {@link OwnerAndPermission} of the ancestor directories of the destination path. The list is ordered from
* deepest to highest directory.
*
* <p>
* For example, if {@link #destination} is /a/b/c/file, then the first element of this list is the desired owner and
* permission for directory /a/b/c, the second is the desired owner and permission for directory /a/b, and so on.
* </p>
*
* <p>
* If there are fewer elements in the list than ancestor directories in {@link #destination}, it is understood that
* extra directories are allowed to have any owner and permission.
* </p>
*/
private List<OwnerAndPermission> ancestorsOwnerAndPermission;
/** Checksum of the origin file. */
private byte[] checksum;
/** Attributes to be preserved. */
private PreserveAttributes preserve;
/** Timestamp of file at its origin source. */
private long originTimestamp;
/** Timestamp of file as in upstream. */
private long upstreamTimestamp;
private String dataFileVersionStrategy;
@lombok.Builder(builderClassName = "Builder", builderMethodName = "_hiddenBuilder")
public CopyableFile(FileStatus origin, Path destination, OwnerAndPermission destinationOwnerAndPermission,
List<OwnerAndPermission> ancestorsOwnerAndPermission, byte[] checksum, PreserveAttributes preserve,
String fileSet, long originTimestamp, long upstreamTimestamp, Map<String, String> additionalMetadata,
String datasetOutputPath,
String dataFileVersionStrategy) {
super(fileSet, additionalMetadata);
this.origin = origin;
this.destination = destination;
this.destinationOwnerAndPermission = destinationOwnerAndPermission;
this.ancestorsOwnerAndPermission = ancestorsOwnerAndPermission;
this.checksum = checksum;
this.preserve = preserve;
this.dataFileVersionStrategy = dataFileVersionStrategy;
this.originTimestamp = originTimestamp;
this.upstreamTimestamp = upstreamTimestamp;
this.datasetOutputPath = datasetOutputPath;
}
/** @return pretty-printed JSON, including all metadata */
public String toJsonString() {
return toJsonString(true);
}
/** @return pretty-printed JSON, optionally including metadata */
public String toJsonString(boolean includeMetadata) {
StringWriter stringWriter = new StringWriter();
try (JsonWriter jsonWriter = new JsonWriter(stringWriter)) {
jsonWriter.setIndent("\t");
this.toJson(jsonWriter, includeMetadata);
} catch (IOException ioe) {
// Ignored
}
return stringWriter.toString();
}
public void toJson(JsonWriter jsonWriter, boolean includeMetadata) throws IOException {
jsonWriter.beginObject();
jsonWriter
.name("file set").value(this.getFileSet())
.name("origin").value(this.getOrigin().toString())
.name("destination").value(this.getDestination().toString())
.name("destinationOwnerAndPermission").value(this.getDestinationOwnerAndPermission().toString())
// TODO:
// this.ancestorsOwnerAndPermission
// this.checksum
// this.preserve
// this.dataFileVersionStrategy
// this.originTimestamp
// this.upstreamTimestamp
.name("datasetOutputPath").value(this.getDatasetOutputPath().toString());
if (includeMetadata && this.getAdditionalMetadata() != null) {
jsonWriter.name("metadata");
jsonWriter.beginObject();
for (Map.Entry<String, String> entry : this.getAdditionalMetadata().entrySet()) {
jsonWriter.name(entry.getKey()).value(entry.getValue());
}
jsonWriter.endObject();
}
jsonWriter.endObject();
}
/**
* Set file system based source and destination dataset for this {@link CopyableFile}
*
* @param originFs {@link FileSystem} where this {@link CopyableFile} origins
* @param targetFs {@link FileSystem} where this {@link CopyableFile} is copied to
*/
public void setFsDatasets(FileSystem originFs, FileSystem targetFs) {
/*
* By default, the raw Gobblin dataset for CopyableFile lineage is its parent folder
* if itself is not a folder
*/
boolean isDir = origin.isDirectory();
Path fullSourcePath = Path.getPathWithoutSchemeAndAuthority(origin.getPath());
String sourceDatasetName = isDir ? fullSourcePath.toString() : fullSourcePath.getParent().toString();
DatasetDescriptor sourceDataset = new DatasetDescriptor(originFs.getScheme(), originFs.getUri(), sourceDatasetName);
sourceDataset.addMetadata(DatasetConstants.FS_URI, originFs.getUri().toString());
sourceData = sourceDataset;
Path fullDestinationPath = Path.getPathWithoutSchemeAndAuthority(destination);
String destinationDatasetName = isDir ? fullDestinationPath.toString() : fullDestinationPath.getParent().toString();
DatasetDescriptor destinationDataset = new DatasetDescriptor(targetFs.getScheme(), targetFs.getUri(),
destinationDatasetName);
destinationDataset.addMetadata(DatasetConstants.FS_URI, targetFs.getUri().toString());
destinationData = destinationDataset;
}
/**
* Get a {@link CopyableFile.Builder}.
*
* @param originFs {@link FileSystem} where original file exists.
* @param origin {@link FileStatus} of the original file.
* @param datasetRoot Value of {@link CopyableDataset#datasetRoot} of the dataset creating this {@link CopyableFile}.
* @param copyConfiguration {@link CopyConfiguration} for the copy job.
* @return a {@link CopyableFile.Builder}.
* @deprecated use {@link #fromOriginAndDestination}. This method was changed to remove reliance on dataset root
* which is not standard of all datasets. The old functionality on inferring destinations cannot be
* achieved without dataset root and common dataset root, so this is an approximation. Copyable datasets
* should compute file destinations themselves.
*/
@Deprecated
public static Builder builder(FileSystem originFs, FileStatus origin, Path datasetRoot,
CopyConfiguration copyConfiguration) {
Path relativePath = PathUtils.relativizePath(origin.getPath(), datasetRoot);
Path targetRoot = new Path(copyConfiguration.getPublishDir(), datasetRoot.getName());
Path targetPath = new Path(targetRoot, relativePath);
return _hiddenBuilder().originFS(originFs).origin(origin).destination(targetPath)
.preserve(copyConfiguration.getPreserve()).configuration(copyConfiguration);
}
public static Builder fromOriginAndDestination(FileSystem originFs, FileStatus origin, Path destination,
CopyConfiguration copyConfiguration) {
return _hiddenBuilder().originFS(originFs).origin(origin).destination(destination).configuration(copyConfiguration)
.preserve(copyConfiguration.getPreserve());
}
/**
* Builder for creating {@link CopyableFile}s.
*
* Allows the {@link CopyableDataset} to set any field of the {@link CopyableFile}, but infers any unset fields
* to facilitate creation of custom {@link CopyableDataset}s. See javadoc for {@link CopyableFile.Builder#build} for
* inference information.
*/
public static class Builder {
private CopyConfiguration configuration;
private FileSystem originFs;
private Map<String, String> additionalMetadata;
private String datasetOutputPath;
private Builder originFS(FileSystem originFs) {
this.originFs = originFs;
return this;
}
private Builder configuration(CopyConfiguration configuration) {
this.configuration = configuration;
return this;
}
/**
* Builds a {@link CopyableFile} using fields set by the {@link CopyableDataset} and inferring unset fields.
* If the {@link Builder} was obtained through {@link CopyableFile#builder}, it is safe to call this method
* even without setting any other fields (they will all be inferred).
*
* <p>
* The inferred fields are as follows:
* * {@link CopyableFile#destinationOwnerAndPermission}: Copy attributes from origin {@link FileStatus} depending
* on the {@link PreserveAttributes} flags {@link #preserve}. Non-preserved attributes are left null,
* allowing Gobblin distcp to use defaults for the target {@link FileSystem}.
* * {@link CopyableFile#ancestorsOwnerAndPermission}: Copy attributes from ancestors of origin path whose name
* exactly matches the corresponding name in the target path and which don't exist on th target. The actual
* owner and permission depend on the {@link PreserveAttributes} flags {@link #preserve}.
* Non-preserved attributes are left null, allowing Gobblin distcp to use defaults for the target {@link FileSystem}.
* * {@link CopyableFile#checksum}: the checksum of the origin {@link FileStatus} obtained using the origin
* {@link FileSystem}.
* * {@link CopyableFile#fileSet}: empty string. Used as default file set per dataset.
* </p>
*
* @return A {@link CopyableFile}.
* @throws IOException
*/
public CopyableFile build() throws IOException {
if (!this.destination.isAbsolute()) {
throw new IOException("Destination must be absolute: " + this.destination);
}
if (this.destinationOwnerAndPermission == null) {
String owner = this.preserve.preserve(Option.OWNER) ? this.origin.getOwner() : null;
String group = null;
if (this.preserve.preserve(Option.GROUP)) {
group = this.origin.getGroup();
} else if (this.configuration.getTargetGroup().isPresent()) {
group = this.configuration.getTargetGroup().get();
}
FsPermission permission = this.preserve.preserve(Option.PERMISSION) ? this.origin.getPermission() : null;
List<AclEntry> aclEntries = this.preserve.preserve(Option.ACLS) ? getAclEntries(this.originFs, this.origin.getPath()) : Lists.newArrayList();
this.destinationOwnerAndPermission = new OwnerAndPermission(owner, group, permission, aclEntries);
}
if (this.ancestorsOwnerAndPermission == null) {
this.ancestorsOwnerAndPermission = replicateAncestorsOwnerAndPermission(this.originFs, this.origin.getPath(),
this.configuration.getTargetFs(), this.destination);
}
if (this.checksum == null) {
if (ConfigUtils.getBoolean(this.configuration.getConfig(), "copy.skipChecksum", true)) {
this.checksum = EMPTY_CHECKSUM;
} else {
FileChecksum checksumTmp = this.origin.isDirectory() ? null : this.originFs.getFileChecksum(this.origin.getPath());
this.checksum = checksumTmp == null ? EMPTY_CHECKSUM : checksumTmp.getBytes();
}
}
if (this.fileSet == null) {
// Default file set per dataset
this.fileSet = "";
}
if (this.originTimestamp == 0) {
this.originTimestamp = this.origin.getModificationTime();
}
if (this.upstreamTimestamp == 0) {
this.upstreamTimestamp = this.origin.getModificationTime();
}
return new CopyableFile(this.origin, this.destination, this.destinationOwnerAndPermission,
this.ancestorsOwnerAndPermission, this.checksum, this.preserve, this.fileSet, this.originTimestamp,
this.upstreamTimestamp, this.additionalMetadata, this.datasetOutputPath, this.dataFileVersionStrategy);
}
private List<OwnerAndPermission> replicateAncestorsOwnerAndPermission(FileSystem originFs, Path originPath,
FileSystem targetFs, Path destinationPath) throws IOException {
List<OwnerAndPermission> ancestorOwnerAndPermissions = Lists.newArrayList();
Path currentOriginPath = originPath.getParent();
Path currentTargetPath = destinationPath.getParent();
while (currentOriginPath != null && currentTargetPath != null
&& currentOriginPath.getName().equals(currentTargetPath.getName())) {
Optional<FileStatus> targetFileStatus =
this.configuration.getCopyContext().getFileStatus(targetFs, currentTargetPath);
if (targetFileStatus.isPresent()) {
return ancestorOwnerAndPermissions;
}
ancestorOwnerAndPermissions
.add(resolveReplicatedOwnerAndPermission(originFs, currentOriginPath, this.configuration));
currentOriginPath = currentOriginPath.getParent();
currentTargetPath = currentTargetPath.getParent();
}
return ancestorOwnerAndPermissions;
}
}
/**
* Computes the correct {@link OwnerAndPermission} obtained from replicating source owner and permissions and applying
* the {@link PreserveAttributes} rules in copyConfiguration.
* @throws IOException
*/
public static OwnerAndPermission resolveReplicatedOwnerAndPermission(FileSystem fs, Path path,
CopyConfiguration copyConfiguration) throws IOException {
Optional<FileStatus> originFileStatus = copyConfiguration.getCopyContext().getFileStatus(fs, path);
if (!originFileStatus.isPresent()) {
throw new IOException(String.format("Origin path %s does not exist.", path));
}
return resolveReplicatedOwnerAndPermission(fs, originFileStatus.get(), copyConfiguration);
}
/**
* Computes the correct {@link OwnerAndPermission} obtained from replicating source owner and permissions and applying
* the {@link PreserveAttributes} rules in copyConfiguration.
* @throws IOException
*/
public static OwnerAndPermission resolveReplicatedOwnerAndPermission(FileSystem fs, FileStatus originFileStatus,
CopyConfiguration copyConfiguration)
throws IOException {
PreserveAttributes preserve = copyConfiguration.getPreserve();
String group = null;
if (copyConfiguration.getTargetGroup().isPresent()) {
group = copyConfiguration.getTargetGroup().get();
} else if (preserve.preserve(Option.GROUP)) {
group = originFileStatus.getGroup();
}
return new OwnerAndPermission(preserve.preserve(Option.OWNER) ? originFileStatus.getOwner() : null, group,
preserve.preserve(Option.PERMISSION) ? originFileStatus.getPermission() : null,
preserve.preserve(Option.ACLS) ? getAclEntries(fs, originFileStatus.getPath()) : Lists.newArrayList());
}
/**
* Compute the correct {@link OwnerAndPermission} obtained from replicating source owner and permissions and applying
* the {@link PreserveAttributes} rules for fromPath and every ancestor up to but excluding toPath.
*
* @return A list of the computed {@link OwnerAndPermission}s starting from fromPath, up to but excluding toPath.
* @throws IOException if toPath is not an ancestor of fromPath.
*/
public static List<OwnerAndPermission> resolveReplicatedOwnerAndPermissionsRecursively(FileSystem sourceFs, Path fromPath,
Path toPath, CopyConfiguration copyConfiguration) throws IOException {
if (!PathUtils.isAncestor(toPath, fromPath)) {
throw new IOException(String.format("toPath %s must be an ancestor of fromPath %s.", toPath, fromPath));
}
List<OwnerAndPermission> ownerAndPermissions = Lists.newArrayList();
Path currentPath = fromPath;
while (currentPath.getParent() != null && PathUtils.isAncestor(toPath, currentPath.getParent())) {
ownerAndPermissions.add(resolveReplicatedOwnerAndPermission(sourceFs, currentPath, copyConfiguration));
currentPath = currentPath.getParent();
}
return ownerAndPermissions;
}
/**
* Compute the correct {@link OwnerAndPermission} obtained from replicating source owner and permissions and applying
* the {@link PreserveAttributes} rules for fromPath and every ancestor up to but excluding toPath.
* Unlike the resolveReplicatedOwnerAndPermissionsRecursively() method, this method utilizes permissionMap as a cache to minimize the number of calls to HDFS.
* It is recommended to use this method when recursively calculating permissions for numerous files that share the same ancestor.
*
* @return A list of the computed {@link OwnerAndPermission}s starting from fromPath, up to but excluding toPath.
* @throws IOException if toPath is not an ancestor of fromPath.
*/
public static List<OwnerAndPermission> resolveReplicatedOwnerAndPermissionsRecursivelyWithCache(FileSystem sourceFs, Path fromPath,
Path toPath, CopyConfiguration copyConfiguration, Cache<String, OwnerAndPermission> permissionMap)
throws IOException, ExecutionException {
if (!PathUtils.isAncestor(toPath, fromPath)) {
throw new IOException(String.format("toPath %s must be an ancestor of fromPath %s.", toPath, fromPath));
}
List<OwnerAndPermission> ownerAndPermissions = Lists.newArrayList();
Path currentPath = fromPath;
while (currentPath.getParent() != null && PathUtils.isAncestor(toPath, currentPath.getParent())) {
Path finalCurrentPath = currentPath;
ownerAndPermissions.add(permissionMap.get(finalCurrentPath.toString(), () -> resolveReplicatedOwnerAndPermission(sourceFs,
finalCurrentPath, copyConfiguration)));
currentPath = currentPath.getParent();
}
return ownerAndPermissions;
}
public static Map<String, OwnerAndPermission> resolveReplicatedAncestorOwnerAndPermissionsRecursively(FileSystem sourceFs, Path fromPath,
Path toPath, CopyConfiguration copyConfiguration) throws IOException {
Preconditions.checkArgument(sourceFs.getFileStatus(fromPath).isDirectory(), "Source path must be a directory.");
Map<String, OwnerAndPermission> ownerAndPermissions = Maps.newHashMap();
// We only pass directories to this method anyways. Those directories themselves need permissions set.
Path currentOriginPath = fromPath;
Path currentTargetPath = toPath;
if (!PathUtils.isAncestor(currentTargetPath, currentOriginPath)) {
throw new IOException(String.format("currentTargetPath %s must be an ancestor of currentOriginPath %s.", currentTargetPath, currentOriginPath));
}
while (PathUtils.isAncestor(currentTargetPath, currentOriginPath.getParent())) {
ownerAndPermissions.put(PathUtils.getPathWithoutSchemeAndAuthority(currentOriginPath).toString(), resolveReplicatedOwnerAndPermission(sourceFs, currentOriginPath, copyConfiguration));
currentOriginPath = currentOriginPath.getParent();
}
// Walk through the parents and preserve the permissions from Origin -> Target as we go in lockstep.
while (currentOriginPath != null && currentTargetPath != null
&& currentOriginPath.getName().equals(currentTargetPath.getName())) {
ownerAndPermissions.put(PathUtils.getPathWithoutSchemeAndAuthority(currentOriginPath).toString(), resolveReplicatedOwnerAndPermission(sourceFs, currentOriginPath, copyConfiguration));
currentOriginPath = currentOriginPath.getParent();
currentTargetPath = currentTargetPath.getParent();
}
return ownerAndPermissions;
}
private static List<AclEntry> getAclEntries(FileSystem srcFs, Path path) throws IOException {
AclStatus aclStatus = srcFs.getAclStatus(path);
return aclStatus.getEntries();
}
@Override
public FileStatus getFileStatus() {
return this.origin;
}
/**
* @return desired block size for destination file.
*/
public long getBlockSize(FileSystem targetFs) {
return getPreserve().preserve(PreserveAttributes.Option.BLOCK_SIZE) ?
getOrigin().getBlockSize() : targetFs.getDefaultBlockSize(this.destination);
}
/**
* @return desired replication for destination file.
*/
public short getReplication(FileSystem targetFs) {
return getPreserve().preserve(PreserveAttributes.Option.REPLICATION) ?
getOrigin().getReplication() : targetFs.getDefaultReplication(this.destination);
}
/**
* Generates a replicable guid to uniquely identify the origin of this {@link CopyableFile}.
* @return a guid uniquely identifying the origin file.
*/
@Override
public Guid guid() throws IOException {
StringBuilder uniqueString = new StringBuilder();
uniqueString.append(getFileStatus().getModificationTime());
uniqueString.append(getFileStatus().getLen());
uniqueString.append(getFileStatus().getPath());
return Guid.fromStrings(uniqueString.toString());
}
@Override
public String explain() {
String owner = this.destinationOwnerAndPermission != null && this.destinationOwnerAndPermission.getOwner() != null
? this.destinationOwnerAndPermission.getOwner() : "preserve";
String group = this.destinationOwnerAndPermission != null && this.destinationOwnerAndPermission.getGroup() != null
? this.destinationOwnerAndPermission.getGroup() : "preserve";
String permissions =
this.destinationOwnerAndPermission != null && this.destinationOwnerAndPermission.getFsPermission() != null
? this.destinationOwnerAndPermission.getFsPermission().toString() : "preserve";
return String.format("Copy file %s to %s with owner %s, group %s, permission %s.", this.origin.getPath(),
this.destination, owner, group, permissions);
}
}
| 2,573 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/TimestampBasedCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTime;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.policy.SelectAfterTimeBasedPolicy;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import org.apache.gobblin.util.filters.HiddenFilter;
/**
* Implementation of {@link CopyableDataset}. It depends on {@link #datasetVersionFinder} to find dataset versions and
* {@link #versionSelectionPolicy} to select the dataset versions for copying. {@link #datasetVersionFinder} is pluggable
* and must implement the interface {@link VersionSelectionPolicy<TimestampedDatasetVersion>}.
*
* The default logic for determining if a file is {@link CopyableFile} is based on the file existence and modified_timestamp at source and target {@link
* FileSystem}s.
*/
@Slf4j
@Getter
@SuppressWarnings("unchecked")
public class TimestampBasedCopyableDataset implements CopyableDataset, FileSystemDataset {
private final Path datasetRoot;
private final VersionFinder<TimestampedDatasetVersion> datasetVersionFinder;
private final VersionSelectionPolicy<TimestampedDatasetVersion> versionSelectionPolicy;
private final ExecutorService executor;
private final FileSystem srcFs;
public static final String DATASET_VERSION_FINDER = "timestamp.based.copyable.dataset.version.finder";
public static final String DEFAULT_DATASET_VERSION_FINDER = DateTimeDatasetVersionFinder.class.getName();
public static final String COPY_POLICY = "timestamp.based.copyable.dataset.copy.policy";
public static final String DEFAULT_COPY_POLICY = SelectAfterTimeBasedPolicy.class.getName();
public static final String THREADPOOL_SIZE_TO_GET_COPYABLE_FILES = "threadpool.size.to.get.copyable.files";
public static final String DEFAULT_THREADPOOL_SIZE_TO_GET_COPYABLE_FILES = "20";
public TimestampBasedCopyableDataset(FileSystem fs, Properties props, Path datasetRoot) {
this.srcFs = fs;
this.datasetRoot = datasetRoot;
try {
Class<?> copyPolicyClass = Class.forName(props.getProperty(COPY_POLICY, DEFAULT_COPY_POLICY));
this.versionSelectionPolicy =
(VersionSelectionPolicy<TimestampedDatasetVersion>) copyPolicyClass.getConstructor(Properties.class)
.newInstance(props);
Class<?> timestampedDatasetVersionFinderClass =
Class.forName(props.getProperty(DATASET_VERSION_FINDER, DEFAULT_DATASET_VERSION_FINDER));
this.datasetVersionFinder =
(VersionFinder<TimestampedDatasetVersion>) timestampedDatasetVersionFinderClass.getConstructor(
FileSystem.class, Properties.class).newInstance(this.srcFs, props);
} catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | IllegalAccessException
| InvocationTargetException exception) {
throw new RuntimeException(exception);
}
this.executor =
ScalingThreadPoolExecutor.newScalingThreadPool(0, Integer.parseInt(props.getProperty(
THREADPOOL_SIZE_TO_GET_COPYABLE_FILES, DEFAULT_THREADPOOL_SIZE_TO_GET_COPYABLE_FILES)), 100, ExecutorsUtils
.newThreadFactory(Optional.of(log), Optional.of(getClass().getSimpleName())));
}
@Override
public Collection<CopyableFile> getCopyableFiles(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
log.info(String.format("Getting copyable files at root path: %s", this.datasetRoot));
List<TimestampedDatasetVersion> versions = Lists.newArrayList(this.datasetVersionFinder.findDatasetVersions(this));
if (versions.isEmpty()) {
log.warn("No dataset version can be found. Ignoring.");
return Lists.newArrayList();
}
Collection<TimestampedDatasetVersion> copyableVersions = this.versionSelectionPolicy.listSelectedVersions(versions);
ConcurrentLinkedQueue<CopyableFile> copyableFileList = new ConcurrentLinkedQueue<>();
List<Future<?>> futures = Lists.newArrayList();
for (TimestampedDatasetVersion copyableVersion : copyableVersions) {
futures.add(this.executor.submit(this.getCopyableFileGenetator(targetFs, configuration, copyableVersion,
copyableFileList)));
}
try {
for (Future<?> future : futures) {
future.get();
}
} catch (ExecutionException | InterruptedException e) {
throw new IOException("Failed to generate copyable files.", e);
} finally {
ExecutorsUtils.shutdownExecutorService(executor, Optional.of(log));
}
return copyableFileList;
}
@VisibleForTesting
protected CopyableFileGenerator getCopyableFileGenetator(FileSystem targetFs, CopyConfiguration configuration,
TimestampedDatasetVersion copyableVersion, ConcurrentLinkedQueue<CopyableFile> copyableFileList) {
return new CopyableFileGenerator(this.srcFs, targetFs, configuration, this.datasetRoot,
this.getTargetRoot(configuration.getPublishDir()), copyableVersion.getDateTime(), copyableVersion.getPaths(),
copyableFileList, this.copyableFileFilter());
}
/**
* @return {@link PathFilter} to find {@link CopyableFile}.
* Can be overridden.
*/
protected PathFilter copyableFileFilter() {
return new HiddenFilter();
}
/**
* @return the default targetRoot {@link Path}.
*/
protected Path getTargetRoot(Path publishDir) {
return new Path(publishDir, datasetRoot.getName());
}
@AllArgsConstructor
protected static class CopyableFileGenerator implements Runnable {
private final FileSystem srcFs;
private final FileSystem targetFs;
private final CopyConfiguration configuration;
private final Path datasetRoot;
private final Path targetRoot;
private final DateTime versionDatetime;
private final Collection<Path> locationsToCopy;
private final ConcurrentLinkedQueue<CopyableFile> copyableFileList;
private final PathFilter filter;
@Override
public void run() {
for (Path locationToCopy : locationsToCopy) {
long timestampFromPath = this.versionDatetime.getMillis();
try {
for (FileStatus singleFile : this.srcFs.listStatus(locationToCopy, this.filter)) {
Path singleFilePath = singleFile.getPath();
log.debug("Checking if it is a copyable file: " + singleFilePath);
Path relativePath = PathUtils.relativizePath(singleFilePath, datasetRoot);
Path targetPath = new Path(targetRoot, relativePath);
if (this.isCopyableFile(singleFile, targetPath)) {
log.debug("Will create workunit for: " + singleFilePath);
copyableFileList
.add(this.generateCopyableFile(singleFile, targetPath, timestampFromPath, locationToCopy));
}
}
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("Failed to get copyable files for " + locationToCopy, e);
}
}
}
@VisibleForTesting
protected CopyableFile generateCopyableFile(FileStatus singleFile, Path targetPath, long timestampFromPath,
Path locationToCopy) throws IOException {
return CopyableFile.fromOriginAndDestination(srcFs, singleFile, targetPath, configuration)
.originTimestamp(timestampFromPath).upstreamTimestamp(timestampFromPath)
.fileSet(PathUtils.getPathWithoutSchemeAndAuthority(locationToCopy).toString()).build();
}
/***
* Given a {@link FileStatus} at src FileSystem, decide if it is a {@link CopyableFile}.
*
* Return true if the {@link Path} of the given {@link FileStatus} does not exist on target {@link FileSystem}, or it
* has a newer modification time stamp on source {@link FileSystem} than target {@link FileSystem}.
*/
private boolean isCopyableFile(FileStatus srcFileStatus, Path targetPath) throws IOException {
if (!this.targetFs.exists(targetPath)) {
return true;
} else if (srcFileStatus.getModificationTime() > this.targetFs.getFileStatus(targetPath).getModificationTime()) {
return true;
} else {
return false;
}
}
}
@Override
public String datasetURN() {
return this.datasetRoot().toString();
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
}
| 2,574 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyableDatasetMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* A {@link CopyableDataset} that is used to serialize into state objects. The class exists because custom
* implementations of {@link CopyableDataset} may contain additional fields that should not be serialized.
* The class is a data object and does not carry any functionality
*/
@EqualsAndHashCode(callSuper = false)
@ToString
public class CopyableDatasetMetadata {
public CopyableDatasetMetadata(CopyableDatasetBase copyableDataset) {
this.datasetURN = copyableDataset.datasetURN();
}
@Getter
private final String datasetURN;
private static final Gson GSON = new Gson();
/**
* Serialize an instance of {@link CopyableDatasetMetadata} into a {@link String}.
*
* @return serialized string
*/
public String serialize() {
return GSON.toJson(this);
}
/**
* Deserializes the serialized {@link CopyableDatasetMetadata} string.
*
* @param serialized string
* @return a new instance of {@link CopyableDatasetMetadata}
*/
public static CopyableDatasetMetadata deserialize(String serialized) {
return GSON.fromJson(getSerializedWithNewPackage(serialized), CopyableDatasetMetadata.class);
}
/**
* Converts package name in serialized string to new name
* This is temporary change and should get removed after all the states are switched from old to new package name.
* @param serialized serialized string possibly having old package names
* @return
*/
private static String getSerializedWithNewPackage(String serialized) {
serialized = serialized.replace("\"gobblin.data.management.", "\"org.apache.gobblin.data.management.");
log.debug("Serialized updated copy entity: " + serialized);
return serialized;
}
}
| 2,575 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/CopyConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.util.Properties;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.request_allocation.RequestAllocatorConfig;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.prioritization.FileSetComparator;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.request_allocation.ResourcePool;
/**
* Configuration for Gobblin distcp jobs.
*/
@Data
@AllArgsConstructor
@Builder
public class CopyConfiguration {
public static final String COPY_PREFIX = "gobblin.copy";
public static final String PRESERVE_ATTRIBUTES_KEY = COPY_PREFIX + ".preserved.attributes";
public static final String PRESERVE_MODTIME_FOR_DIR = COPY_PREFIX + ".preserve.mod.time.for.dir";
//Setting this to be false means we will not overwrite the owner and permission if dir already exist on dst
//This will only take effect when we use manifest based copy
public static final String RESYNC_DIR_OWNER_AND_PERMISSION_FOR_MANIFEST_COPY = COPY_PREFIX + ".manifest.resyncDirOwnerAndPermission";
public static final String DESTINATION_GROUP_KEY = COPY_PREFIX + ".dataset.destination.group";
public static final String PRIORITIZATION_PREFIX = COPY_PREFIX + ".prioritization";
/**
* Include empty directories in the source for copy
*/
public static final String INCLUDE_EMPTY_DIRECTORIES = COPY_PREFIX + ".includeEmptyDirectories";
public static final String APPLY_FILTER_TO_DIRECTORIES = COPY_PREFIX + ".applyFilterToDirectories";
public static final String ENFORCE_FILE_LENGTH_MATCH = COPY_PREFIX + "enforce.fileLength.match";
public static final String DEFAULT_ENFORCE_FILE_LENGTH_MATCH = "true";
public static final String PRIORITIZER_ALIAS_KEY = PRIORITIZATION_PREFIX + ".prioritizerAlias";
public static final String MAX_COPY_PREFIX = PRIORITIZATION_PREFIX + ".maxCopy";
public static final String BINPACKING_MAX_PER_BUCKET_PREFIX = COPY_PREFIX + ".binPacking.maxPerBucket";
public static final String BUFFER_SIZE = COPY_PREFIX + ".bufferSize";
public static final String ABORT_ON_SINGLE_DATASET_FAILURE = COPY_PREFIX + ".abortOnSingleDatasetFailure";
/*
* Config to store different classes of rejected requests. Possible values are "all","none", or "min" (default).
*/
public static final String STORE_REJECTED_REQUESTS_KEY = COPY_PREFIX + ".store.rejected.requests";
public static final String DEFAULT_STORE_REJECTED_REQUESTS =
RequestAllocatorConfig.StoreRejectedRequestsConfig.MIN.name();
/**
* User supplied directory where files should be published. This value is identical for all datasets in the distcp job.
*/
private final Path publishDir;
/**
* Preserve options passed by the user.
*/
private final PreserveAttributes preserve;
/**
* {@link CopyContext} for this job.
*/
private final CopyContext copyContext;
private final Optional<String> targetGroup;
private final FileSystem targetFs;
private final Optional<FileSetComparator> prioritizer;
private final ResourcePool maxToCopy;
private final String storeRejectedRequestsSetting;
private final Config config;
private final boolean abortOnSingleDatasetFailure;
private final boolean enforceFileLengthMatch;
public static class CopyConfigurationBuilder {
private PreserveAttributes preserve;
private Optional<String> targetGroup;
private CopyContext copyContext;
private Path publishDir;
public CopyConfigurationBuilder(FileSystem targetFs, Properties properties) {
Preconditions.checkArgument(properties.containsKey(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR),
"Missing property " + ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR);
this.config = ConfigUtils.propertiesToConfig(properties);
this.targetGroup =
properties.containsKey(DESTINATION_GROUP_KEY) ? Optional.of(properties.getProperty(DESTINATION_GROUP_KEY))
: Optional.<String>absent();
this.preserve = PreserveAttributes.fromMnemonicString(properties.getProperty(PRESERVE_ATTRIBUTES_KEY));
Path publishDirTmp = new Path(properties.getProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR));
if (!publishDirTmp.isAbsolute()) {
publishDirTmp = new Path(targetFs.getWorkingDirectory(), publishDirTmp);
}
this.publishDir = publishDirTmp;
this.copyContext = new CopyContext();
this.targetFs = targetFs;
if (properties.containsKey(PRIORITIZER_ALIAS_KEY)) {
try {
this.prioritizer = Optional.of(GobblinConstructorUtils.<FileSetComparator>invokeLongestConstructor(
new ClassAliasResolver(FileSetComparator.class)
.resolveClass(properties.getProperty(PRIORITIZER_ALIAS_KEY)), properties));
} catch (ReflectiveOperationException roe) {
throw new RuntimeException("Could not build prioritizer.", roe);
}
} else {
this.prioritizer = Optional.absent();
}
this.maxToCopy = CopyResourcePool.fromConfig(ConfigUtils.getConfigOrEmpty(this.config, MAX_COPY_PREFIX));
this.enforceFileLengthMatch = PropertiesUtils.getPropAsBoolean(properties, ENFORCE_FILE_LENGTH_MATCH, DEFAULT_ENFORCE_FILE_LENGTH_MATCH);
this.storeRejectedRequestsSetting =
properties.getProperty(CopyConfiguration.STORE_REJECTED_REQUESTS_KEY, DEFAULT_STORE_REJECTED_REQUESTS);
this.abortOnSingleDatasetFailure = false;
if (this.config.hasPath(ABORT_ON_SINGLE_DATASET_FAILURE)) {
this.abortOnSingleDatasetFailure = this.config.getBoolean(ABORT_ON_SINGLE_DATASET_FAILURE);
}
}
}
public static CopyConfigurationBuilder builder(FileSystem targetFs, Properties properties) {
return new CopyConfigurationBuilder(targetFs, properties);
}
public Config getPrioritizationConfig() {
return ConfigUtils.getConfigOrEmpty(this.config, PRIORITIZATION_PREFIX);
}
}
| 2,576 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/FileAwareInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.InputStream;
import lombok.Builder;
import lombok.NonNull;
import lombok.Getter;
import com.google.common.base.Optional;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
/**
* A wrapper to {@link InputStream} that represents an entity to be copied. The enclosed {@link CopyableFile} instance
* contains file Metadata like permission, destination path etc. required by the writers and converters.
* The enclosed {@link DistcpFileSplitter.Split} object indicates whether the {@link InputStream} to be copied is a
* block of the {@link CopyableFile} or not. If it is present, the {@link InputStream} should already be at the start
* position of the specified split/block.
*/
@Getter
public class FileAwareInputStream {
private CopyableFile file;
private InputStream inputStream;
private Optional<DistcpFileSplitter.Split> split = Optional.absent();
@Builder(toBuilder = true)
public FileAwareInputStream(@NonNull CopyableFile file, @NonNull InputStream inputStream,
Optional<DistcpFileSplitter.Split> split) {
this.file = file;
this.inputStream = inputStream;
this.split = split == null ? Optional.<DistcpFileSplitter.Split>absent() : split;
}
@Override
public String toString() {
return this.file.toString();
}
}
| 2,577 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/PreserveAttributes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* Configuration for preserving attributes in Gobblin distcp jobs.
*/
@AllArgsConstructor
@EqualsAndHashCode
public class PreserveAttributes {
public static final Pattern ATTRIBUTES_REGEXP = getAllowedRegexp();
/**
* Attributes that can be preserved.
*/
public static enum Option {
REPLICATION('r'),
BLOCK_SIZE('b'),
OWNER('u'),
GROUP('g'),
PERMISSION('p'),
VERSION('v'),
ACLS('a'),
MOD_TIME('t');
private final char token;
Option(char token) {
this.token = token;
}
}
private int options;
/**
* @return true if attribute should be preserved.
*/
public boolean preserve(Option option) {
return 0 < (this.options & (1 << option.ordinal()));
}
/**
* Converts this instance of {@link PreserveAttributes} into a String that can be converted to an equivalent
* {@link PreserveAttributes} using {@link PreserveAttributes#fromMnemonicString}. See the latter for more
* information.
* @return a String that can be converted to an equivalent {@link PreserveAttributes} using
* {@link PreserveAttributes#fromMnemonicString}
*/
public String toMnemonicString() {
int value = this.options;
StringBuilder mnemonicString = new StringBuilder();
for (Option option : Option.values()) {
if (value % 2 != 0) {
mnemonicString.append(option.token);
}
value >>= 1;
}
return mnemonicString.toString();
}
/**
* Parse {@link PreserveAttributes} from a string of the form \[rbugp]*\:
* * r -> preserve replication
* * b -> preserve block size
* * u -> preserve owner
* * g -> preserve group
* * p -> preserve permissions
* * v -> preserve version
* * t -> preserve file's modTime
* Characters not in this character set will be ignored.
*
* @param s String of the form \[rbugpv]*\
* @return Parsed {@link PreserveAttributes}
*/
public static PreserveAttributes fromMnemonicString(String s) {
if (Strings.isNullOrEmpty(s)) {
return new PreserveAttributes(0);
}
s = s.toLowerCase();
Preconditions.checkArgument(ATTRIBUTES_REGEXP.matcher(s).matches(), "Invalid %s string %s, must be of the form %s.",
PreserveAttributes.class.getSimpleName(), s, ATTRIBUTES_REGEXP.pattern());
int value = 0;
for (Option option : Option.values()) {
if (s.indexOf(option.token) >= 0) {
value |= 1 << option.ordinal();
}
}
return new PreserveAttributes(value);
}
private static Pattern getAllowedRegexp() {
StringBuilder builder = new StringBuilder("[");
for (Option option : Option.values()) {
builder.append(option.token);
}
builder.append("]*");
return Pattern.compile(builder.toString());
}
}
| 2,578 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/SchemaCheckedCopySource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.extractor.FileAwareInputStreamExtractorWithCheckSchema;
import org.apache.gobblin.source.extractor.Extractor;
/**
* Used instead of {@link CopySource} for {@link FileSystem}s that need to check the schema
* during the process of data deployment.
*/
public class SchemaCheckedCopySource extends CopySource {
@Override
protected Extractor<String, FileAwareInputStream> extractorForCopyableFile(FileSystem fs, CopyableFile cf,
WorkUnitState state)
throws IOException {
return new FileAwareInputStreamExtractorWithCheckSchema(fs, cf, state);
}
}
| 2,579 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/RootDirectoryModtimeSkipPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper;
import org.apache.gobblin.data.management.copy.hive.HivePartitionFileSet;
import org.apache.gobblin.util.PathUtils;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Use with {@link HiveCopyEntityHelper#FAST_PARTITION_SKIP_PREDICATE}.
* Skips partitions whose data location exists in the target, and such that the target location has a newer mod time
* than the source location.
*/
@AllArgsConstructor
@Slf4j
public class RootDirectoryModtimeSkipPredicate implements Predicate<HivePartitionFileSet> {
private final HiveCopyEntityHelper helper;
@Override
public boolean apply(@Nullable HivePartitionFileSet input) {
if (input == null) {
return true;
}
if (!input.getExistingTargetPartition().isPresent()) {
return false;
}
try {
if (PathUtils.isGlob(input.getPartition().getDataLocation())) {
log.error(String.format("%s cannot be applied to globbed location %s. Will not skip.",
this.getClass().getSimpleName(), input.getPartition().getDataLocation()));
return false;
}
Path targetPath = this.helper.getTargetFileSystem().makeQualified(this.helper.getTargetPathHelper().getTargetPath(
input.getPartition().getDataLocation(), this.helper.getTargetFs(), Optional.of(input.getPartition()), false));
Optional<FileStatus> targetFileStatus =
this.helper.getConfiguration().getCopyContext().getFileStatus(this.helper.getTargetFs(), targetPath);
if (!targetFileStatus.isPresent()) {
return false;
}
Optional<FileStatus> sourceFileStatus = this.helper.getConfiguration().getCopyContext()
.getFileStatus(this.helper.getDataset().getFs(), input.getPartition().getDataLocation());
if (!sourceFileStatus.isPresent()) {
throw new RuntimeException(
String.format("Source path %s does not exist!", input.getPartition().getDataLocation()));
}
return targetFileStatus.get().getModificationTime() > sourceFileStatus.get().getModificationTime();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 2,580 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/AlwaysTrue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import com.google.common.base.Predicate;
import javax.annotation.Nullable;
/**
* Predicate that is always true.
*/
public class AlwaysTrue<T> implements Predicate<T> {
@Override
public boolean apply(@Nullable T input) {
return true;
}
}
| 2,581 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/ExistingPartitionSkipPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import com.google.common.base.Predicate;
import javax.annotation.Nullable;
import org.apache.gobblin.data.management.copy.hive.HivePartitionFileSet;
/**
* This skip predicate will skip any partition that's already registered in the destination
* hive table.
*/
public class ExistingPartitionSkipPredicate implements Predicate<HivePartitionFileSet> {
@Override
public boolean apply(@Nullable HivePartitionFileSet input) {
if (input == null) {
return true;
}
return input.getExistingTargetPartition().isPresent();
}
}
| 2,582 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/RegistrationTimeSkipPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HivePartitionFileSet;
import org.apache.gobblin.util.PathUtils;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A fast partition skip predicate that reads the parameter {@link HiveDataset#REGISTRATION_GENERATION_TIME_MILLIS} from
* the Hive partition and skips the copy if it is newer than the modification time of the location of the source
* partition.
*/
@AllArgsConstructor
@Slf4j
public class RegistrationTimeSkipPredicate implements Predicate<HivePartitionFileSet> {
private final HiveCopyEntityHelper helper;
@Override
public boolean apply(@Nullable HivePartitionFileSet input) {
if (input == null) {
return true;
}
if (input.getExistingTargetPartition() == null) {
throw new RuntimeException("Existing target partition has not been computed! This is an error in the code.");
}
if (PathUtils.isGlob(input.getPartition().getDataLocation())) {
log.error(String.format("%s cannot be applied to globbed location %s. Will not skip.", this.getClass().getSimpleName(),
input.getPartition().getDataLocation()));
return false;
}
if (!input.getExistingTargetPartition().isPresent()) {
// Target partition doesn't exit, don't skip
return false;
}
if (!input.getExistingTargetPartition().get().getParameters().containsKey(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS)) {
// Target partition is not annotated with registration time, don't skip
return false;
}
try {
long oldRegistrationTime = Long.parseLong(
input.getExistingTargetPartition().get().getParameters().get(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS));
Optional<FileStatus> sourceFileStatus = this.helper.getConfiguration().getCopyContext().
getFileStatus(this.helper.getDataset().getFs(), input.getPartition().getDataLocation());
if (!sourceFileStatus.isPresent()) {
throw new RuntimeException(String.format("Source path %s does not exist!", input.getPartition().getDataLocation()));
}
// If the registration time of the partition is higher than the modification time, skip
return oldRegistrationTime > sourceFileStatus.get().getModificationTime();
} catch (NumberFormatException nfe) {
// Cannot parse registration generation time, don't skip
log.warn(String.format("Cannot parse %s in partition %s. Will not skip.",
HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, input.getPartition().getCompleteName()));
return false;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 2,583 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/NonPartitionTableRegistrationTimeSkipPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveLocationDescriptor;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* A fast table skip predicate that reads the parameter {@link HiveDataset#REGISTRATION_GENERATION_TIME_MILLIS} from
* the Hive non partition table and skips the copy if it is newer than the modification time of the location of the source
* table.
*/
@AllArgsConstructor
@Slf4j
public class NonPartitionTableRegistrationTimeSkipPredicate implements Predicate<HiveCopyEntityHelper> {
@Override
public boolean apply(@Nullable HiveCopyEntityHelper helper) {
if (helper == null) {
return true;
}
if (!helper.getExistingTargetTable().isPresent()) {
// Target table doesn't exit, don't skip
return false;
}
if (!helper.getExistingTargetTable().get().getParameters().containsKey(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS)) {
// Target table is not annotated with registration time, don't skip
return false;
}
try {
long oldRegistrationTime = Long.parseLong(
helper.getExistingTargetTable().get().getParameters().get(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS));
HiveLocationDescriptor sourceHiveDescriptor = HiveLocationDescriptor.forTable(helper.getDataset().getTable(),
helper.getDataset().getFs(), helper.getDataset().getProperties());
Optional<FileStatus> sourceFileStatus = helper.getConfiguration().getCopyContext().
getFileStatus(helper.getDataset().getFs(), sourceHiveDescriptor.getLocation());
if (!sourceFileStatus.isPresent()) {
throw new RuntimeException(String.format("Source path %s does not exist!", sourceHiveDescriptor.getLocation()));
}
// If the registration time of the table is higher than the modification time, skip
return oldRegistrationTime > sourceFileStatus.get().getModificationTime();
} catch (NumberFormatException nfe) {
// Cannot parse registration generation time, don't skip
log.warn(String.format("Cannot parse %s in table %s. Will not skip.",
HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, helper.getDataset().getTable().getDbName()+"."+helper.getDataset().getTable().getTableName()));
return false;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
| 2,584 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/predicates/TableTypeFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import java.util.Properties;
import org.apache.hadoop.hive.metastore.api.Table;
import com.google.common.base.Predicate;
import javax.annotation.Nullable;
/**
* A predicate to check if a hive {@link Table} is of a certain type in {@link TABLE_TYPE}
*
* <p> Example usage: {@link org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder#TABLE_FILTER}
*/
public class TableTypeFilter implements Predicate<Table> {
public static final String FILTER_TYPE = "tableTypeFilter.type";
private enum TABLE_TYPE {
SNAPSHOT,
PARTITIONED
}
private final TABLE_TYPE tableType;
public TableTypeFilter(Properties props) {
tableType = TABLE_TYPE.valueOf(
props.getProperty(FILTER_TYPE, TABLE_TYPE.SNAPSHOT.name()).toUpperCase());
}
@Override
public boolean apply(@Nullable Table input) {
if (input == null) {
return false;
}
switch (tableType) {
case SNAPSHOT:
return input.getPartitionKeys() == null || input.getPartitionKeys().size() == 0;
case PARTITIONED:
return input.getPartitionKeys() != null && input.getPartitionKeys().size() > 0;
default:
throw new UnsupportedOperationException("Invalid type: " + tableType);
}
}
}
| 2,585 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/prioritization/FileSetComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.prioritization;
import java.util.Comparator;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.partition.FileSet;
/**
* An alias for a {@link Comparator} of {@link FileSet} for type safety.
*/
public interface FileSetComparator extends Comparator<FileSet<CopyEntity>> {
public interface Factory {
FileSetComparator create(Config config);
}
}
| 2,586 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/prioritization/PrioritizedCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.prioritization;
import java.io.IOException;
import java.util.Comparator;
import java.util.Iterator;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.IterableCopyableDataset;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* An {@link IterableCopyableDataset} where {@link org.apache.gobblin.data.management.partition.FileSet}s generated by
* {@link #getFileSetIterator(FileSystem, CopyConfiguration)} are guaranteed to be ordered by the prioritizer at
* {@link CopyConfiguration#getPrioritizer()}.
*/
public interface PrioritizedCopyableDataset extends IterableCopyableDataset {
/**
* Get an iterator of {@link FileSet}s of {@link CopyEntity}, each one representing a group of files to copy and
* associated actions, sorted by the input {@link Comparator},
* and with the provided {@link org.apache.gobblin.util.request_allocation.Requestor} injected (this is important for pushdown).
*
* @param targetFs target {@link org.apache.hadoop.fs.FileSystem} where copied files will be placed.
* @param configuration {@link org.apache.gobblin.data.management.copy.CopyConfiguration} for this job. See {@link org.apache.gobblin.data.management.copy.CopyConfiguration}.
* @param prioritizer output {@link FileSet}s must be sorted by this {@link Comparator}.
* @param requestor the {@link org.apache.gobblin.util.request_allocation.Requestor} object that all {@link FileSet}s should have.
* @throws IOException
*/
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration,
Comparator<FileSet<CopyEntity>> prioritizer, PushDownRequestor<FileSet<CopyEntity>> requestor) throws IOException;
}
| 2,587 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/converter/DecryptConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.converter;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.password.PasswordManager;
/**
* {@link Converter} that decrypts an {@link InputStream}.
*
* The encryption algorithm will be selected by looking at the {@code converter.encrypt.algorithm} configuration key
* in the job. See {@link EncryptionConfigParser} for more details.
*
* If no algorithm is specified then the converter will default to gpg for backwards compatibility
* reasons.
*/
@Slf4j
public class DecryptConverter extends DistcpConverter {
private static final String DEFAULT_ALGORITHM = "gpg"; // for backwards compatibility
private static final String DECRYPTION_PASSPHRASE_KEY = "converter.decrypt.passphrase";
private StreamCodec decryptor;
@Override
public Converter<String, String, FileAwareInputStream, FileAwareInputStream> init(WorkUnitState workUnit) {
Map<String, Object> config =
EncryptionConfigParser.getConfigForBranch(EncryptionConfigParser.EntityType.CONVERTER_ENCRYPT, workUnit);
if (config == null) {
// Backwards compatibility check: if no config was passed in via the standard config, revert back to GPG
// with the passphrase in DECRYPTION_PASSPHRASE_KEY.
log.info("Assuming GPG decryption since no other config parameters are set");
config = Maps.newHashMap();
config.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, DEFAULT_ALGORITHM);
Preconditions.checkArgument(workUnit.contains(DECRYPTION_PASSPHRASE_KEY),
"Passphrase is required while using DecryptConverter. Please specify " + DECRYPTION_PASSPHRASE_KEY);
String passphrase =
PasswordManager.getInstance(workUnit).readPassword(workUnit.getProp(DECRYPTION_PASSPHRASE_KEY));
config.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, passphrase);
}
decryptor = EncryptionFactory.buildStreamCryptoProvider(config);
return super.init(workUnit);
}
@Override
public Function<InputStream, InputStream> inputStreamTransformation() {
return new Function<InputStream, InputStream>() {
@Nullable
@Override
public InputStream apply(InputStream input) {
try {
return decryptor.decodeInputStream(input);
} catch (IOException exception) {
throw new RuntimeException(exception);
}
}
};
}
@Override
public List<String> extensionsToRemove() {
return Lists.newArrayList("." + decryptor.getTag());
}
}
| 2,588 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/converter/UnGzipConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.converter;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.zip.GZIPInputStream;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import javax.annotation.Nullable;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.util.io.StreamUtils;
/**
* A {@link Converter} that converts an archived {@link InputStream} to a tar {@link InputStream}. Wraps the given
* archived (.tar.gz or .tgz) {@link InputStream} with {@link GZIPInputStream} Use this converter if the
* {@link InputStream} from source is compressed.
* It also converts the destination file name by removing tar and gz extensions.
*/
public class UnGzipConverter extends DistcpConverter {
private static final String TAR_EXTENSION = ".tar";
private static final String GZIP_EXTENSION = ".gzip";
private static final String GZ_EXTENSION = ".gz";
private static final String TGZ_EXTENSION = ".tgz";
@Override public Function<InputStream, InputStream> inputStreamTransformation() {
return new Function<InputStream, InputStream>() {
@Nullable @Override public InputStream apply(InputStream input) {
try {
return StreamUtils.convertStream(new GZIPInputStream(input));
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
};
}
@Override public List<String> extensionsToRemove() {
return Lists.newArrayList(TAR_EXTENSION, GZIP_EXTENSION, GZ_EXTENSION, TGZ_EXTENSION);
}
}
| 2,589 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/converter/DistcpConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.converter;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Function;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.util.PathUtils;
/**
* Abstract class for distcp {@link Converter}. Simply transforms the {@link InputStream} in the
* {@link FileAwareInputStream}, and possibly modifies extensions of the output file.
*/
public abstract class DistcpConverter extends Converter<String, String, FileAwareInputStream, FileAwareInputStream> {
@Override
public Converter<String, String, FileAwareInputStream, FileAwareInputStream> init(WorkUnitState workUnit) {
return super.init(workUnit);
}
/**
* @return A {@link Function} that transforms the {@link InputStream} in the {@link FileAwareInputStream}.
*/
public abstract Function<InputStream, InputStream> inputStreamTransformation();
/**
* @return A list of extensions that should be removed from the output file name, which will be applied in order.
* For example, if this method returns ["gz", "tar", "tgz"] then "file.tar.gz" becomes "file".
*/
public List<String> extensionsToRemove() {
return new ArrayList<>();
}
/**
* TODO: actually use this method and add the extensions.
* @return A list of extensions that should be added to the output file name, to be applied in order.
* For example, if this method returns ["tar", "gz"] then "file" becomes "file.tar.gz".
*/
public List<String> extensionsToAdd() {
return new ArrayList<>();
}
/**
* Identity schema converter.
*/
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
/**
* Applies the transformation in {@link #inputStreamTransformation} to the {@link InputStream} in the
* {@link FileAwareInputStream}.
*/
@Override
public Iterable<FileAwareInputStream> convertRecord(String outputSchema, FileAwareInputStream fileAwareInputStream,
WorkUnitState workUnit) throws DataConversionException {
modifyExtensionAtDestination(fileAwareInputStream.getFile());
try {
InputStream newInputStream = inputStreamTransformation().apply(fileAwareInputStream.getInputStream());
return new SingleRecordIterable<>(fileAwareInputStream.toBuilder().inputStream(newInputStream).build());
} catch (RuntimeException re) {
throw new DataConversionException(re);
}
}
private void modifyExtensionAtDestination(CopyableFile file) {
if (extensionsToRemove().size() > 0) {
file.setDestination(PathUtils.removeExtension(file.getDestination(), extensionsToRemove().toArray(new String[0])));
}
}
}
| 2,590 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/publisher/DeletingCopyDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.publisher;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.ReadyCopyableFileFilter;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.PathUtils;
/**
* A {@link CopyDataPublisher} that deletes files on the source fileSystem for all the {@link WorkUnitState}s that are
* successfully committed/published
*/
@Slf4j
public class DeletingCopyDataPublisher extends CopyDataPublisher {
private final FileSystem sourceFs;
public DeletingCopyDataPublisher(State state) throws IOException {
super(state);
Configuration conf = HadoopUtils.getConfFromState(state);
String uri = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI);
this.sourceFs = FileSystem.get(URI.create(uri), conf);
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
super.publishData(states);
for (WorkUnitState state : states) {
if (state.getWorkingState() == WorkingState.COMMITTED) {
try {
deleteFilesOnSource(state);
} catch (Throwable t) {
log.warn(
String.format("Failed to delete one or more files on source in %s",
state.getProp(CopySource.SERIALIZED_COPYABLE_FILE)), t);
}
} else {
log.info(String.format("Not deleting files %s on source fileSystem as the workunit state is %s.",
state.getProp(CopySource.SERIALIZED_COPYABLE_FILE), state.getWorkingState()));
}
}
}
private void deleteFilesOnSource(WorkUnitState state) throws IOException {
CopyEntity copyEntity = CopySource.deserializeCopyEntity(state);
if (copyEntity instanceof CopyableFile) {
HadoopUtils.deletePath(this.sourceFs, ((CopyableFile) copyEntity).getOrigin().getPath(), true);
HadoopUtils.deletePath(this.sourceFs, PathUtils.addExtension(((CopyableFile) copyEntity).getOrigin().getPath(),
ReadyCopyableFileFilter.READY_EXTENSION), true);
}
}
}
| 2,591 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/publisher/CopyEventSubmitterHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.publisher;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.metrics.event.sla.SlaEventSubmitter;
import java.util.Map;
/**
* Helper class to submit events for copy job
*/
public class CopyEventSubmitterHelper {
private static final String DATASET_PUBLISHED_EVENT_NAME = "DatasetPublished";
private static final String DATASET_PUBLISHED_FAILED_EVENT_NAME = "DatasetPublishFailed";
private static final String FILE_PUBLISHED_EVENT_NAME = "FilePublished";
public static final String DATASET_ROOT_METADATA_NAME = "datasetUrn";
public static final String DATASET_TARGET_ROOT_METADATA_NAME = "datasetTargetRoot";
public static final String TARGET_PATH = "TargetPath";
public static final String SOURCE_PATH = "SourcePath";
public static final String SIZE_IN_BYTES = "SizeInBytes";
static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter,
CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp,
Map<String, String> additionalMetadata) {
SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME)
.datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition())
.originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata)
.build().submit();
}
static void submitFailedDatasetPublish(EventSubmitter eventSubmitter,
CopyEntity.DatasetAndPartition datasetAndPartition) {
eventSubmitter.submit(DATASET_PUBLISHED_FAILED_EVENT_NAME,
ImmutableMap.of(DATASET_ROOT_METADATA_NAME, datasetAndPartition.getDataset().getDatasetURN()));
}
/**
* Submit an sla event when a {@link org.apache.gobblin.data.management.copy.CopyableFile} is published. The <code>workUnitState</code> passed should have the
* required {@link SlaEventKeys} set.
*
* @see SlaEventSubmitter#submit()
*
* @param eventSubmitter
* @param workUnitState
*/
static void submitSuccessfulFilePublish(EventSubmitter eventSubmitter, CopyableFile cf, WorkUnitState workUnitState) {
String datasetUrn = workUnitState.getProp(SlaEventKeys.DATASET_URN_KEY);
String partition = workUnitState.getProp(SlaEventKeys.PARTITION_KEY);
String completenessPercentage = workUnitState.getProp(SlaEventKeys.COMPLETENESS_PERCENTAGE_KEY);
String recordCount = workUnitState.getProp(SlaEventKeys.RECORD_COUNT_KEY);
String previousPublishTimestamp = workUnitState.getProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY);
String dedupeStatus = workUnitState.getProp(SlaEventKeys.DEDUPE_STATUS_KEY);
SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(FILE_PUBLISHED_EVENT_NAME)
.datasetUrn(datasetUrn).partition(partition).originTimestamp(Long.toString(cf.getOriginTimestamp()))
.upstreamTimestamp(Long.toString(cf.getUpstreamTimestamp())).completenessPercentage(completenessPercentage)
.recordCount(recordCount).previousPublishTimestamp(previousPublishTimestamp).dedupeStatus(dedupeStatus)
.additionalMetadata(TARGET_PATH, cf.getDestination().toString())
.additionalMetadata(SOURCE_PATH, cf.getOrigin().getPath().toString())
.additionalMetadata(SIZE_IN_BYTES, Long.toString(cf.getOrigin().getLen())).build().submit();
}
}
| 2,592 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/publisher/CopyDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.publisher;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.filesystem.DataFileVersionStrategy;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.entities.CommitStepCopyEntity;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.data.management.copy.recovery.RecoveryHelper;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
import org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriter;
import org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriterBuilder;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.UnpublishedHandling;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.WriterUtils;
/**
* A {@link DataPublisher} to {@link org.apache.gobblin.data.management.copy.CopyEntity}s from task output to final destination.
*/
@Slf4j
public class CopyDataPublisher extends DataPublisher implements UnpublishedHandling {
@Override
public boolean isThreadSafe() {
return this.getClass() == CopyDataPublisher.class;
}
private final FileSystem srcFs;
private final FileSystem fs;
protected final EventSubmitter eventSubmitter;
protected final RecoveryHelper recoveryHelper;
protected final Optional<LineageInfo> lineageInfo;
protected final DataFileVersionStrategy srcDataFileVersionStrategy;
protected final DataFileVersionStrategy dstDataFileVersionStrategy;
protected final boolean preserveDirModTime;
protected final boolean resyncDirOwnerAndPermission;
/**
* Build a new {@link CopyDataPublisher} from {@link State}. The constructor expects the following to be set in the
* {@link State},
* <ul>
* <li>{@link ConfigurationKeys#WRITER_OUTPUT_DIR}
* <li>{@link ConfigurationKeys#WRITER_FILE_SYSTEM_URI}
* </ul>
*
*/
public CopyDataPublisher(State state) throws IOException {
super(state);
// Extract LineageInfo from state
if (state instanceof SourceState) {
lineageInfo = LineageInfo.getLineageInfo(((SourceState) state).getBroker());
} else if (state instanceof WorkUnitState) {
lineageInfo = LineageInfo.getLineageInfo(((WorkUnitState) state).getTaskBrokerNullable());
} else {
lineageInfo = Optional.absent();
}
String uri = this.state.getProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, ConfigurationKeys.LOCAL_FS_URI);
this.fs = FileSystem.get(URI.create(uri), WriterUtils.getFsConfiguration(state));
MetricContext metricContext =
Instrumented.getMetricContext(state, CopyDataPublisher.class, GobblinMetrics.getCustomTagsFromState(state));
this.eventSubmitter = new EventSubmitter.Builder(metricContext, "org.apache.gobblin.copy.CopyDataPublisher").build();
this.recoveryHelper = new RecoveryHelper(this.fs, state);
this.recoveryHelper.purgeOldPersistedFile();
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
this.srcFs = HadoopUtils.getSourceFileSystem(state);
this.srcDataFileVersionStrategy = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.srcFs, config);
this.dstDataFileVersionStrategy = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.fs, config);
// Default to be true to preserve the original behavior
this.preserveDirModTime = state.getPropAsBoolean(CopyConfiguration.PRESERVE_MODTIME_FOR_DIR, true);
this.resyncDirOwnerAndPermission = state.getPropAsBoolean(CopyConfiguration.RESYNC_DIR_OWNER_AND_PERMISSION_FOR_MANIFEST_COPY, false);
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
/*
* This mapping is used to set WorkingState of all {@link WorkUnitState}s to {@link
* WorkUnitState.WorkingState#COMMITTED} after a {@link CopyableDataset} is successfully published
*/
Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> datasets = groupByFileSet(states);
boolean allDatasetsPublished = true;
for (CopyEntity.DatasetAndPartition datasetAndPartition : datasets.keySet()) {
try {
this.publishFileSet(datasetAndPartition, datasets.get(datasetAndPartition));
} catch (Throwable e) {
CopyEventSubmitterHelper.submitFailedDatasetPublish(this.eventSubmitter, datasetAndPartition);
log.error("Failed to publish " + datasetAndPartition.getDataset().getDatasetURN(), e);
allDatasetsPublished = false;
}
}
if (!allDatasetsPublished) {
throw new IOException("Not all datasets published successfully");
}
}
@Override
public void handleUnpublishedWorkUnits(Collection<? extends WorkUnitState> states) throws IOException {
int filesPersisted = persistFailedFileSet(states);
log.info(String.format("Successfully persisted %d work units.", filesPersisted));
}
/**
* Create a {@link Multimap} that maps a {@link CopyableDataset} to all {@link WorkUnitState}s that belong to this
* {@link CopyableDataset}. This mapping is used to set WorkingState of all {@link WorkUnitState}s to
* {@link WorkUnitState.WorkingState#COMMITTED} after a {@link CopyableDataset} is successfully published.
*/
private static Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> groupByFileSet(
Collection<? extends WorkUnitState> states) {
Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> datasetRoots = ArrayListMultimap.create();
for (WorkUnitState workUnitState : states) {
CopyEntity file = CopySource.deserializeCopyEntity(workUnitState);
CopyEntity.DatasetAndPartition datasetAndPartition = file.getDatasetAndPartition(
CopyableDatasetMetadata.deserialize(workUnitState.getProp(CopySource.SERIALIZED_COPYABLE_DATASET)));
datasetRoots.put(datasetAndPartition, workUnitState);
}
return datasetRoots;
}
/**
* Unlike other preserving attributes of files (ownership, group, etc.), which is preserved in writer,
* some of the attributes have to be set during publish phase like ModTime,
* and versionStrategy (usually relevant to modTime as well), since they are subject to change with Publish(rename)
*/
private void preserveFileAttrInPublisher(CopyableFile copyableFile) throws IOException {
if (copyableFile.getFileStatus().isDirectory() && this.resyncDirOwnerAndPermission){
FileStatus dstFile = this.fs.getFileStatus(copyableFile.getDestination());
// User specifically try to copy dir metadata, so we change the group and permissions on destination even when the dir already existed
FileAwareInputStreamDataWriter.safeSetPathPermission(this.fs, dstFile,copyableFile.getDestinationOwnerAndPermission());
}
if (preserveDirModTime || copyableFile.getFileStatus().isFile()) {
// Preserving File ModTime, and set the access time to an initializing value when ModTime is declared to be preserved.
if (copyableFile.getPreserve().preserve(PreserveAttributes.Option.MOD_TIME)) {
fs.setTimes(copyableFile.getDestination(), copyableFile.getOriginTimestamp(), -1);
}
// Preserving File Version.
DataFileVersionStrategy srcVS = this.srcDataFileVersionStrategy;
DataFileVersionStrategy dstVS = this.dstDataFileVersionStrategy;
// Prefer to use copyableFile's specific version strategy
if (copyableFile.getDataFileVersionStrategy() != null) {
Config versionStrategyConfig = ConfigFactory.parseMap(
ImmutableMap.of(DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY, copyableFile.getDataFileVersionStrategy()));
srcVS = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.srcFs, versionStrategyConfig);
dstVS = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.fs, versionStrategyConfig);
}
if (copyableFile.getPreserve().preserve(PreserveAttributes.Option.VERSION) && dstVS.hasCharacteristic(
DataFileVersionStrategy.Characteristic.SETTABLE)) {
dstVS.setVersion(copyableFile.getDestination(), srcVS.getVersion(copyableFile.getOrigin().getPath()));
}
}
}
/**
* Publish data for a {@link CopyableDataset}.
*/
private void publishFileSet(CopyEntity.DatasetAndPartition datasetAndPartition,
Collection<WorkUnitState> datasetWorkUnitStates) throws IOException {
Map<String, String> additionalMetadata = Maps.newHashMap();
Preconditions.checkArgument(!datasetWorkUnitStates.isEmpty(),
"publishFileSet received an empty collection work units. This is an error in code.");
WorkUnitState sampledWorkUnitState = datasetWorkUnitStates.iterator().next();
CopyableDatasetMetadata metadata = CopyableDatasetMetadata.deserialize(
sampledWorkUnitState.getProp(CopySource.SERIALIZED_COPYABLE_DATASET));
// If not already done, ensure that the writer outputs have the job ID appended to avoid corruption from previous runs
FileAwareInputStreamDataWriterBuilder.setJobSpecificOutputPaths(sampledWorkUnitState);
Path writerOutputDir = new Path(sampledWorkUnitState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR));
Path datasetWriterOutputPath = new Path(writerOutputDir, datasetAndPartition.identifier());
log.info("Merging all split work units.");
DistcpFileSplitter.mergeAllSplitWorkUnits(this.fs, datasetWorkUnitStates);
log.info(String.format("[%s] Publishing fileSet from %s for dataset %s", datasetAndPartition.identifier(),
datasetWriterOutputPath, metadata.getDatasetURN()));
List<CommitStep> prePublish = getCommitSequence(datasetWorkUnitStates, PrePublishStep.class);
List<CommitStep> postPublish = getCommitSequence(datasetWorkUnitStates, PostPublishStep.class);
log.info(String.format("[%s] Found %d prePublish steps and %d postPublish steps.", datasetAndPartition.identifier(),
prePublish.size(), postPublish.size()));
executeCommitSequence(prePublish);
if (hasCopyableFiles(datasetWorkUnitStates)) {
// Targets are always absolute, so we start moving from root (will skip any existing directories).
HadoopUtils.renameRecursively(this.fs, datasetWriterOutputPath, new Path("/"));
} else {
log.info(String.format("[%s] No copyable files in dataset. Proceeding to postpublish steps.", datasetAndPartition.identifier()));
}
this.fs.delete(datasetWriterOutputPath, true);
long datasetOriginTimestamp = Long.MAX_VALUE;
long datasetUpstreamTimestamp = Long.MAX_VALUE;
Optional<String> fileSetRoot = Optional.absent();
for (WorkUnitState wus : datasetWorkUnitStates) {
if (wus.getWorkingState() == WorkingState.SUCCESSFUL) {
wus.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
}
CopyEntity copyEntity = CopySource.deserializeCopyEntity(wus);
if (copyEntity instanceof CopyableFile) {
CopyableFile copyableFile = (CopyableFile) copyEntity;
preserveFileAttrInPublisher(copyableFile);
if (wus.getWorkingState() == WorkingState.COMMITTED) {
CopyEventSubmitterHelper.submitSuccessfulFilePublish(this.eventSubmitter, copyableFile, wus);
// Dataset Output path is injected in each copyableFile.
// This can be optimized by having a dataset level equivalent class for copyable entities
// and storing dataset related information, e.g. dataset output path, there.
// Currently datasetOutputPath is only present for hive datasets.
if (!fileSetRoot.isPresent() && copyableFile.getDatasetOutputPath() != null) {
fileSetRoot = Optional.of(copyableFile.getDatasetOutputPath());
}
if (lineageInfo.isPresent()) {
lineageInfo.get().putDestination(copyableFile.getDestinationData(), 0, wus);
}
}
if (datasetOriginTimestamp > copyableFile.getOriginTimestamp()) {
datasetOriginTimestamp = copyableFile.getOriginTimestamp();
}
if (datasetUpstreamTimestamp > copyableFile.getUpstreamTimestamp()) {
datasetUpstreamTimestamp = copyableFile.getUpstreamTimestamp();
}
}
}
// execute post publish commit steps after preserving file attributes, because some post publish step,
// e.g. SetPermissionCommitStep needs to set permissions
executeCommitSequence(postPublish);
// if there are no valid values for datasetOriginTimestamp and datasetUpstreamTimestamp, use
// something more readable
if (Long.MAX_VALUE == datasetOriginTimestamp) {
datasetOriginTimestamp = 0;
}
if (Long.MAX_VALUE == datasetUpstreamTimestamp) {
datasetUpstreamTimestamp = 0;
}
additionalMetadata.put(SlaEventKeys.SOURCE_URI, this.state.getProp(SlaEventKeys.SOURCE_URI));
additionalMetadata.put(SlaEventKeys.DESTINATION_URI, this.state.getProp(SlaEventKeys.DESTINATION_URI));
additionalMetadata.put(SlaEventKeys.DATASET_OUTPUT_PATH, fileSetRoot.or("Unknown"));
CopyEventSubmitterHelper.submitSuccessfulDatasetPublish(this.eventSubmitter, datasetAndPartition,
Long.toString(datasetOriginTimestamp), Long.toString(datasetUpstreamTimestamp), additionalMetadata);
}
private static boolean hasCopyableFiles(Collection<WorkUnitState> workUnits) throws IOException {
for (WorkUnitState wus : workUnits) {
if (CopyableFile.class.isAssignableFrom(CopySource.getCopyEntityClass(wus))) {
return true;
}
}
return false;
}
private static List<CommitStep> getCommitSequence(Collection<WorkUnitState> workUnits, Class<?> baseClass)
throws IOException {
List<CommitStepCopyEntity> steps = Lists.newArrayList();
for (WorkUnitState wus : workUnits) {
if (baseClass.isAssignableFrom(CopySource.getCopyEntityClass(wus))) {
CommitStepCopyEntity step = (CommitStepCopyEntity) CopySource.deserializeCopyEntity(wus);
steps.add(step);
}
}
Comparator<CommitStepCopyEntity> commitStepSorter = new Comparator<CommitStepCopyEntity>() {
@Override
public int compare(CommitStepCopyEntity o1, CommitStepCopyEntity o2) {
return Integer.compare(o1.getPriority(), o2.getPriority());
}
};
Collections.sort(steps, commitStepSorter);
List<CommitStep> sequence = Lists.newArrayList();
for (CommitStepCopyEntity entity : steps) {
sequence.add(entity.getStep());
}
return sequence;
}
private static void executeCommitSequence(List<CommitStep> steps) throws IOException {
for (CommitStep step : steps) {
step.execute();
}
}
private int persistFailedFileSet(Collection<? extends WorkUnitState> workUnitStates) throws IOException {
int filesPersisted = 0;
for (WorkUnitState wu : workUnitStates) {
if (wu.getWorkingState() == WorkingState.SUCCESSFUL) {
CopyEntity entity = CopySource.deserializeCopyEntity(wu);
if (entity instanceof CopyableFile) {
CopyableFile file = (CopyableFile) entity;
Path outputDir = FileAwareInputStreamDataWriter.getOutputDir(wu);
CopyableDatasetMetadata metadata = CopySource.deserializeCopyableDataset(wu);
Path outputPath =
FileAwareInputStreamDataWriter.getOutputFilePath(file, outputDir, file.getDatasetAndPartition(metadata));
if (this.recoveryHelper.persistFile(wu, file, outputPath)) {
filesPersisted++;
}
}
}
}
return filesPersisted;
}
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states) throws IOException {}
@Override
public void close() throws IOException {}
@Override
public void initialize() throws IOException {}
}
| 2,593 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/recovery/RecoveryHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.recovery;
import lombok.extern.slf4j.Slf4j;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.util.guid.Guid;
/**
* Helper class for distcp work unit recovery.
*/
@Slf4j
public class RecoveryHelper {
public static final String PERSIST_DIR_KEY = "distcp.persist.dir";
public static final String PERSIST_RETENTION_KEY = "distcp.persist.retention.hours";
public static final int DEFAULT_PERSIST_RETENTION = 24;
private final FileSystem fs;
private final Optional<Path> persistDir;
private final int retentionHours;
public RecoveryHelper(FileSystem fs, State state) throws IOException {
this.fs = fs;
this.persistDir = getPersistDir(state);
this.retentionHours = state.getPropAsInt(PERSIST_RETENTION_KEY, DEFAULT_PERSIST_RETENTION);
}
/**
* Get the persist directory for this job.
* @param state {@link State} containing job information.
* @return A {@link Path} used as persist directory for this job. Note this path is user-specific for security reasons.
* @throws java.io.IOException
*/
public static Optional<Path> getPersistDir(State state) throws IOException {
if (state.contains(PERSIST_DIR_KEY)) {
return Optional
.of(new Path(state.getProp(PERSIST_DIR_KEY), UserGroupInformation.getCurrentUser().getShortUserName()));
}
return Optional.absent();
}
/**
* Moves a copied path into a persistent location managed by gobblin-distcp. This method is used when an already
* copied file cannot be successfully published. In future runs, instead of re-copying the file, distcp will use the
* persisted file.
*
* @param state {@link State} containing job information.
* @param file {@link org.apache.gobblin.data.management.copy.CopyEntity} from which input {@link Path} originated.
* @param path {@link Path} to persist.
* @return true if persist was successful.
* @throws IOException
*/
public boolean persistFile(State state, CopyableFile file, Path path) throws IOException {
if (!this.persistDir.isPresent()) {
return false;
}
String guid = computeGuid(state, file);
Path guidPath = new Path(this.persistDir.get(), guid);
if (!this.fs.exists(guidPath)) {
this.fs.mkdirs(guidPath, new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE));
}
Path targetPath = new Path(guidPath, shortenPathName(file.getOrigin().getPath(), 250 - guid.length()));
log.info(String.format("Persisting file %s with guid %s to location %s.", path, guid, targetPath));
if (this.fs.rename(path, targetPath)) {
this.fs.setTimes(targetPath, System.currentTimeMillis(), -1);
return true;
}
return false;
}
/**
* Searches the persist directory to find {@link Path}s matching the input {@link org.apache.gobblin.data.management.copy.CopyEntity}.
* @param state {@link State} containing job information.
* @param file {@link org.apache.gobblin.data.management.copy.CopyEntity} for which persisted {@link Path}s should be found.
* @param filter {@link com.google.common.base.Predicate} used to filter found paths.
* @return Optionally, a {@link Path} in the {@link FileSystem} that is the desired copy of the {@link org.apache.gobblin.data.management.copy.CopyEntity}.
* @throws IOException
*/
public Optional<FileStatus> findPersistedFile(State state, CopyEntity file, Predicate<FileStatus> filter)
throws IOException {
if (!this.persistDir.isPresent() || !this.fs.exists(this.persistDir.get())) {
return Optional.absent();
}
Path guidPath = new Path(this.persistDir.get(), computeGuid(state, file));
FileStatus[] statuses;
try {
statuses = this.fs.listStatus(guidPath);
} catch (FileNotFoundException e) {
return Optional.absent();
}
for (FileStatus fileStatus : statuses) {
if (filter.apply(fileStatus)) {
return Optional.of(fileStatus);
}
}
return Optional.absent();
}
/**
* Delete all persisted files older than the number of hours set by {@link #PERSIST_RETENTION_KEY}.
* @throws IOException
*/
public void purgeOldPersistedFile() throws IOException {
if (!this.persistDir.isPresent() || !this.fs.exists(this.persistDir.get())) {
log.info("No persist directory to clean.");
return;
}
long retentionMillis = TimeUnit.HOURS.toMillis(this.retentionHours);
long now = System.currentTimeMillis();
for (FileStatus fileStatus : this.fs.listStatus(this.persistDir.get())) {
if (now - fileStatus.getModificationTime() > retentionMillis) {
if (!this.fs.delete(fileStatus.getPath(), true)) {
log.warn("Failed to delete path " + fileStatus.getPath());
}
}
}
}
/**
* Shorten an absolute path into a sanitized String of length at most bytes. This is useful for including a summary
* of an absolute path in a file name.
*
* <p>
* For example: shortenPathName("/user/gobblin/foo/bar/myFile.txt", 25) will be shortened to "_user_gobbl..._myFile.txt".
* </p>
*
* @param path absolute {@link Path} to shorten.
* @param bytes max number of UTF8 bytes that output string can use (note that,
* for now, it is assumed that each character uses exactly one byte).
* @return a shortened, sanitized String of length at most bytes.
*/
static String shortenPathName(Path path, int bytes) {
String pathString = path.toUri().getPath();
String replaced = pathString.replace("/", "_");
if (replaced.length() <= bytes) {
return replaced;
}
int bytesPerHalf = (bytes - 3) / 2;
return replaced.substring(0, bytesPerHalf) + "..." + replaced.substring(replaced.length() - bytesPerHalf);
}
private static String computeGuid(State state, CopyEntity file) throws IOException {
Optional<Guid> stateGuid = CopySource.getWorkUnitGuid(state);
if (stateGuid.isPresent()) {
return Guid.combine(file.guid(), stateGuid.get()).toString();
}
throw new IOException("State does not contain a guid.");
}
}
| 2,594 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/splitter/DistcpFileSplitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.splitter;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.math3.util.ArithmeticUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.gson.Gson;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.IdentityConverter;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriter;
import org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriterBuilder;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.guid.Guid;
/**
* Helper class for splitting files for distcp. The property flag gobblin.copy.split.enabled should be used to enable
* splitting of files (which is disabled by default). Splitting should only be used if the distcp job uses only the
* IdentityConverter and should not be used for distcp jobs that require decryption/ungzipping.
*/
@Slf4j
public class DistcpFileSplitter {
public static final String SPLIT_ENABLED = CopyConfiguration.COPY_PREFIX + ".split.enabled";
public static final String MAX_SPLIT_SIZE_KEY = CopyConfiguration.COPY_PREFIX + ".file.max.split.size";
public static final long DEFAULT_MAX_SPLIT_SIZE = Long.MAX_VALUE;
public static final Set<String> KNOWN_SCHEMES_SUPPORTING_CONCAT = Sets.newHashSet("hdfs", "adl");
/**
* A split for a distcp file. Represents a section of a file; split should be aligned to block boundaries.
*/
@Data
public static class Split {
private final long lowPosition;
private final long highPosition;
private final int splitNumber;
private final int totalSplits;
private final String partName;
public final boolean isLastSplit() {
return this.splitNumber == this.totalSplits - 1;
}
}
private static final String SPLIT_KEY = CopyConfiguration.COPY_PREFIX + ".file.splitter.split";
private static final Gson GSON = new Gson();
/**
* Split an input {@link CopyableFile} into multiple splits aligned with block boundaries.
*
* @param file {@link CopyableFile} to split.
* @param workUnit {@link WorkUnit} generated for this file.
* @param targetFs destination {@link FileSystem} where file is to be copied.
* @return a list of {@link WorkUnit}, each for a split of this file.
* @throws IOException
*/
public static Collection<WorkUnit> splitFile(CopyableFile file, WorkUnit workUnit, FileSystem targetFs)
throws IOException {
long len = file.getFileStatus().getLen();
// get lcm of source and target block size so that split aligns with block boundaries for both extract and write
long blockSize = ArithmeticUtils.lcm(file.getFileStatus().getBlockSize(), file.getBlockSize(targetFs));
long maxSplitSize = workUnit.getPropAsLong(MAX_SPLIT_SIZE_KEY, DEFAULT_MAX_SPLIT_SIZE);
if (maxSplitSize < blockSize) {
log.warn(String.format("Max split size must be at least block size. Adjusting to %d.", blockSize));
maxSplitSize = blockSize;
}
if (len < maxSplitSize) {
return Lists.newArrayList(workUnit);
}
Collection<WorkUnit> newWorkUnits = Lists.newArrayList();
long lengthPerSplit = (maxSplitSize / blockSize) * blockSize;
int splits = (int) (len / lengthPerSplit + 1);
for (int i = 0; i < splits; i++) {
WorkUnit newWorkUnit = WorkUnit.copyOf(workUnit);
long lowPos = lengthPerSplit * i;
long highPos = Math.min(lengthPerSplit * (i + 1), len);
Split split = new Split(lowPos, highPos, i, splits,
String.format("%s.__PART%d__", file.getDestination().getName(), i));
String serializedSplit = GSON.toJson(split);
newWorkUnit.setProp(SPLIT_KEY, serializedSplit);
Guid oldGuid = CopySource.getWorkUnitGuid(newWorkUnit).get();
Guid newGuid = oldGuid.append(Guid.fromStrings(serializedSplit));
CopySource.setWorkUnitGuid(workUnit, newGuid);
newWorkUnits.add(newWorkUnit);
}
return newWorkUnits;
}
/**
* Finds all split work units in the input collection and merges the file parts into the expected output files.
* @param fs {@link FileSystem} where file parts exist.
* @param workUnits Collection of {@link WorkUnitState}s possibly containing split work units.
* @return The collection of {@link WorkUnitState}s where split work units for each file have been merged.
* @throws IOException
*/
public static Collection<WorkUnitState> mergeAllSplitWorkUnits(FileSystem fs, Collection<WorkUnitState> workUnits)
throws IOException {
ListMultimap<CopyableFile, WorkUnitState> splitWorkUnitsMap = ArrayListMultimap.create();
for (WorkUnitState workUnit : workUnits) {
if (isSplitWorkUnit(workUnit)) {
CopyableFile copyableFile = (CopyableFile) CopySource.deserializeCopyEntity(workUnit);
splitWorkUnitsMap.put(copyableFile, workUnit);
}
}
for (CopyableFile file : splitWorkUnitsMap.keySet()) {
log.info(String.format("Merging split file %s.", file.getDestination()));
WorkUnitState oldWorkUnit = splitWorkUnitsMap.get(file).get(0);
Path outputDir = FileAwareInputStreamDataWriter.getOutputDir(oldWorkUnit);
CopyEntity.DatasetAndPartition datasetAndPartition =
file.getDatasetAndPartition(CopySource.deserializeCopyableDataset(oldWorkUnit));
Path parentPath = FileAwareInputStreamDataWriter.getOutputFilePath(file, outputDir, datasetAndPartition)
.getParent();
WorkUnitState newWorkUnit = mergeSplits(fs, file, splitWorkUnitsMap.get(file), parentPath);
for (WorkUnitState wu : splitWorkUnitsMap.get(file)) {
// Set to committed so that task states will not fail
wu.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
workUnits.remove(wu);
}
workUnits.add(newWorkUnit);
}
return workUnits;
}
/**
* Merges all the splits for a given file.
* Should be called on the target/destination file system (after blocks have been copied to targetFs).
* @param fs {@link FileSystem} where file parts exist.
* @param file {@link CopyableFile} to merge.
* @param workUnits {@link WorkUnitState}s for all parts of this file.
* @param parentPath {@link Path} where the parts of the file are located.
* @return a {@link WorkUnit} equivalent to the distcp work unit if the file had not been split.
* @throws IOException
*/
private static WorkUnitState mergeSplits(FileSystem fs, CopyableFile file, Collection<WorkUnitState> workUnits,
Path parentPath) throws IOException {
log.info(String.format("File %s was written in %d parts. Merging.", file.getDestination(), workUnits.size()));
Path[] parts = new Path[workUnits.size()];
for (WorkUnitState workUnit : workUnits) {
if (!isSplitWorkUnit(workUnit)) {
throw new IOException("Not a split work unit.");
}
Split split = getSplit(workUnit).get();
parts[split.getSplitNumber()] = new Path(parentPath, split.getPartName());
}
Path target = new Path(parentPath, file.getDestination().getName());
fs.rename(parts[0], target);
fs.concat(target, Arrays.copyOfRange(parts, 1, parts.length));
WorkUnitState finalWorkUnit = workUnits.iterator().next();
finalWorkUnit.removeProp(SPLIT_KEY);
return finalWorkUnit;
}
/**
* @return whether the {@link WorkUnit} is a split work unit.
*/
public static boolean isSplitWorkUnit(State workUnit) {
return workUnit.contains(SPLIT_KEY);
}
/**
* @return the {@link Split} object contained in the {@link WorkUnit}.
*/
public static Optional<Split> getSplit(State workUnit) {
return workUnit.contains(SPLIT_KEY) ? Optional.of(GSON.fromJson(workUnit.getProp(SPLIT_KEY), Split.class))
: Optional.<Split>absent();
}
/**
* @param state {@link State} containing properties for a job.
* @param targetFs destination {@link FileSystem} where file is to be copied
* @return whether to allow for splitting of work units based on the filesystem, state, converter/writer config.
*/
public static boolean allowSplit(State state, FileSystem targetFs) {
// Don't allow distcp jobs that use decrypt/ungzip converters or tararchive/encrypt writers to split work units
Collection<String> converterClassNames = Collections.emptyList();
if (state.contains(ConfigurationKeys.CONVERTER_CLASSES_KEY)) {
converterClassNames = state.getPropAsList(ConfigurationKeys.CONVERTER_CLASSES_KEY);
}
return state.getPropAsBoolean(SPLIT_ENABLED, false) &&
KNOWN_SCHEMES_SUPPORTING_CONCAT.contains(targetFs.getUri().getScheme()) &&
state.getProp(ConfigurationKeys.WRITER_BUILDER_CLASS, "")
.equals(FileAwareInputStreamDataWriterBuilder.class.getName()) &&
converterClassNames.stream().noneMatch(s -> !s.equals(IdentityConverter.class.getName()));
}
}
| 2,595 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/extractor/FileAwareInputStreamExtractorWithCheckSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.extractor;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.FsInput;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.util.schema_check.AvroSchemaCheckStrategy;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.source.extractor.DataRecordException;
/**
* Used instead of {@link FileAwareInputStreamExtractor} that extracts {@link InputStream}s. This extractor will first
* check if the schema matches the expected schema. If not it will abort the job.
*/
public class FileAwareInputStreamExtractorWithCheckSchema extends FileAwareInputStreamExtractor {
public FileAwareInputStreamExtractorWithCheckSchema(FileSystem fs, CopyableFile file, WorkUnitState state) {
super(fs, file, state);
}
public FileAwareInputStreamExtractorWithCheckSchema(FileSystem fs, CopyableFile file) {
this(fs, file, null);
}
@Override
protected FileAwareInputStream buildStream(FileSystem fsFromFile) throws DataRecordException, IOException {
if (!schemaChecking(fsFromFile)) {
throw new DataRecordException("Schema does not match the expected schema");
}
return super.buildStream(fsFromFile);
}
/**
* Use {@link AvroSchemaCheckStrategy} to make sure the real schema and the expected schema have matching field names and types
* @param fsFromFile
* @return
* @throws IOException
*/
protected boolean schemaChecking(FileSystem fsFromFile) throws IOException {
if( !this.state.getPropAsBoolean(CopySource.SCHEMA_CHECK_ENABLED, CopySource.DEFAULT_SCHEMA_CHECK_ENABLED) ) {
return true;
}
DatumReader<GenericRecord> datumReader = new GenericDatumReader<>();
DataFileReader<GenericRecord> dataFileReader =
new DataFileReader(new FsInput(this.file.getFileStatus().getPath(), new Configuration()), datumReader);
Schema schema = dataFileReader.getSchema();
if(this.state.getProp(ConfigurationKeys.COPY_EXPECTED_SCHEMA) == null) {
throw new IOException("Expected schema is not set properly");
}
Schema expectedSchema = new Schema.Parser().parse(this.state.getProp(ConfigurationKeys.COPY_EXPECTED_SCHEMA));
AvroSchemaCheckStrategy strategy = AvroSchemaCheckStrategy.AvroSchemaCheckStrategyFactory.create(this.state);
if(strategy == null) {
throw new IOException("schema check strategy cannot be initialized");
}
return strategy.compare(expectedSchema,schema);
}
}
| 2,596 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/extractor/CloseableFsFileAwareInputStreamExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.extractor;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.source.extractor.extract.sftp.SftpLightWeightFileSystem;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.io.Closer;
/**
* Used instead of {@link FileAwareInputStreamExtractor} for {@link FileSystem}s that need be closed after use E.g
* {@link SftpLightWeightFileSystem}.
*/
public class CloseableFsFileAwareInputStreamExtractor extends FileAwareInputStreamExtractor {
private final Closer closer = Closer.create();
public CloseableFsFileAwareInputStreamExtractor(FileSystem fs, CopyableFile file, WorkUnitState state)
throws IOException {
super(fs, file, state);
this.closer.register(fs);
}
public CloseableFsFileAwareInputStreamExtractor(FileSystem fs, CopyableFile file)
throws IOException {
super(fs, file);
this.closer.register(fs);
}
@Override
public void close()
throws IOException {
this.closer.close();
}
}
| 2,597 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/extractor/EmptyExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.extractor;
import lombok.AllArgsConstructor;
import java.io.IOException;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
/**
* An extractor that returns no records.
*/
@AllArgsConstructor
public class EmptyExtractor<S, D> implements Extractor<S, D> {
private final S schema;
@Override public S getSchema() throws IOException {
return this.schema;
}
@Override public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
return null;
}
@Override public long getExpectedRecordCount() {
return 0;
}
@Override public long getHighWatermark() {
return 0;
}
@Override public void close() throws IOException {}
}
| 2,598 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/extractor/FileAwareInputStreamExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.extractor;
import com.google.common.base.Optional;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.io.EmptyInputStream;
import org.apache.gobblin.util.io.MeteredInputStream;
/**
* An implementation of {@link Extractor} that extracts {@link InputStream}s. This extractor is suitable for copy jobs
* where files from any source to a sink. The extractor extracts a {@link FileAwareInputStream} which encompasses an
* {@link InputStream} and a {@link org.apache.gobblin.data.management.copy.CopyEntity} for every file that needs to be copied.
*
* <p>
* In Gobblin {@link Extractor} terms, each {@link FileAwareInputStream} is a record. i.e one record per copyable file.
* The extractor is capable of extracting multiple files
* <p>
*/
public class FileAwareInputStreamExtractor implements Extractor<String, FileAwareInputStream> {
protected final FileSystem fs;
protected final CopyableFile file;
protected final WorkUnitState state;
/** True indicates the unique record has already been read. */
protected boolean recordRead;
public FileAwareInputStreamExtractor(FileSystem fs, CopyableFile file, WorkUnitState state) {
this.fs = fs;
this.file = file;
this.state = state;
this.recordRead = false;
}
public FileAwareInputStreamExtractor(FileSystem fs, CopyableFile file) {
this(fs, file, null);
}
/**
* @return Constant string schema.
* @throws IOException
*/
@Override
public String getSchema()
throws IOException {
return FileAwareInputStream.class.getName();
}
@Override
public FileAwareInputStream readRecord(@Deprecated FileAwareInputStream reuse)
throws DataRecordException, IOException {
if (!this.recordRead) {
Configuration conf =
this.state == null ? HadoopUtils.newConfiguration() : HadoopUtils.getConfFromState(this.state);
FileSystem fsFromFile = this.file.getOrigin().getPath().getFileSystem(conf);
return buildStream(fsFromFile);
}
return null;
}
protected FileAwareInputStream buildStream(FileSystem fsFromFile)
throws DataRecordException, IOException{
this.recordRead = true;
FileAwareInputStream.FileAwareInputStreamBuilder builder = FileAwareInputStream.builder().file(this.file);
if (this.file.getFileStatus().isDirectory()) {
return builder.inputStream(EmptyInputStream.instance).build();
}
FSDataInputStream dataInputStream = fsFromFile.open(this.file.getFileStatus().getPath());
if (this.state != null && DistcpFileSplitter.isSplitWorkUnit(this.state)) {
Optional<DistcpFileSplitter.Split> split = DistcpFileSplitter.getSplit(this.state);
builder.split(split);
if (split.isPresent()) {
dataInputStream.seek(split.get().getLowPosition());
}
}
builder.inputStream(MeteredInputStream.builder().in(dataInputStream).build());
return builder.build();
}
/**
* Each {@link FileAwareInputStreamExtractor} processes exactly one record.
*/
@Override
public long getExpectedRecordCount() {
return 1;
}
@Override
public long getHighWatermark() {
return 0;
}
@Override
public void close()
throws IOException {
}
}
| 2,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.