index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergHiveCatalog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.hive.HiveCatalog;
import lombok.extern.slf4j.Slf4j;
/**
* Hive-Metastore-based {@link IcebergCatalog}.
*/
@Slf4j
public class IcebergHiveCatalog extends BaseIcebergCatalog {
public static final String HIVE_CATALOG_NAME = "HiveCatalog";
// NOTE: specifically necessitates `HiveCatalog`, as `BaseMetastoreCatalog.newTableOps` is `protected`!
private HiveCatalog hc;
public IcebergHiveCatalog() {
super(HIVE_CATALOG_NAME, HiveCatalog.class);
}
@Override
public void initialize(Map<String, String> properties, Configuration configuration) {
hc = (HiveCatalog) createCompanionCatalog(properties, configuration);
}
@Override
public String getCatalogUri() {
return hc.getConf().get(HiveConf.ConfVars.METASTOREURIS.varname, "<<not set>>");
}
@Override
protected TableOperations createTableOperations(TableIdentifier tableId) {
return hc.newTableOps(tableId);
}
@Override
public boolean tableAlreadyExists(IcebergTable icebergTable) {
return hc.tableExists(icebergTable.getTableId());
}
}
| 2,600 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/BaseIcebergCatalog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
/**
* Base implementation of {@link IcebergCatalog} to access {@link IcebergTable} and the
* underlying concrete companion catalog e.g. {@link org.apache.iceberg.hive.HiveCatalog}
*/
public abstract class BaseIcebergCatalog implements IcebergCatalog {
protected final String catalogName;
protected final Class<? extends Catalog> companionCatalogClass;
protected BaseIcebergCatalog(String catalogName, Class<? extends Catalog> companionCatalogClass) {
this.catalogName = catalogName;
this.companionCatalogClass = companionCatalogClass;
}
@Override
public IcebergTable openTable(String dbName, String tableName) {
TableIdentifier tableId = TableIdentifier.of(dbName, tableName);
return new IcebergTable(tableId, createTableOperations(tableId), this.getCatalogUri());
}
protected Catalog createCompanionCatalog(Map<String, String> properties, Configuration configuration) {
return CatalogUtil.loadCatalog(this.companionCatalogClass.getName(), this.catalogName, properties, configuration);
}
protected abstract TableOperations createTableOperations(TableIdentifier tableId);
}
| 2,601 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.util.HadoopUtils;
/**
* Finds {@link IcebergDataset}s. Will look for tables in a database using a {@link IcebergCatalog},
* and creates a {@link IcebergDataset} for each one.
*/
@Slf4j
@RequiredArgsConstructor
public class IcebergDatasetFinder implements IterableDatasetFinder<IcebergDataset> {
public static final String ICEBERG_DATASET_PREFIX = DatasetConstants.PLATFORM_ICEBERG + ".dataset";
public static final String DEFAULT_ICEBERG_CATALOG_CLASS = "org.apache.gobblin.data.management.copy.iceberg.IcebergHiveCatalog";
public static final String ICEBERG_CATALOG_KEY = "catalog";
/**
* This is used with a prefix: "{@link IcebergDatasetFinder#ICEBERG_DATASET_PREFIX}" + "." + "(source or destination)" + "." + "{@link IcebergDatasetFinder#ICEBERG_CATALOG_KEY}" + "..."
* It is an open-ended pattern used to pass arbitrary catalog specific properties
*/
public static final String ICEBERG_CATALOG_CLASS_KEY = "class";
public static final String ICEBERG_DB_NAME = ICEBERG_DATASET_PREFIX + ".database.name";
public static final String ICEBERG_TABLE_NAME = ICEBERG_DATASET_PREFIX + ".table.name";
public enum CatalogLocation {
SOURCE,
DESTINATION;
/**
* Provides prefix for configs based on the catalog location to filter catalog specific properties
*/
public String getConfigPrefix() {
return ICEBERG_DATASET_PREFIX + "." + this.toString().toLowerCase() + "." + ICEBERG_CATALOG_KEY + ".";
}
}
protected final FileSystem sourceFs;
private final Properties properties;
/**
* Finds all {@link IcebergDataset}s in the file system using the Iceberg Catalog.
* Both Iceberg database name and table name are mandatory based on current implementation.
* Later we may explore supporting datasets similar to Hive
* @return List of {@link IcebergDataset}s in the file system.
* @throws IOException
*/
@Override
public List<IcebergDataset> findDatasets() throws IOException {
List<IcebergDataset> matchingDatasets = new ArrayList<>();
if (StringUtils.isBlank(properties.getProperty(ICEBERG_DB_NAME)) || StringUtils.isBlank(properties.getProperty(ICEBERG_TABLE_NAME))) {
throw new IllegalArgumentException(String.format("Iceberg database name: {%s} or Iceberg table name: {%s} is missing",
ICEBERG_DB_NAME, ICEBERG_TABLE_NAME));
}
String dbName = properties.getProperty(ICEBERG_DB_NAME);
String tblName = properties.getProperty(ICEBERG_TABLE_NAME);
IcebergCatalog sourceIcebergCatalog = createIcebergCatalog(this.properties, CatalogLocation.SOURCE);
IcebergCatalog destinationIcebergCatalog = createIcebergCatalog(this.properties, CatalogLocation.DESTINATION);
/* Each Iceberg dataset maps to an Iceberg table */
matchingDatasets.add(createIcebergDataset(dbName, tblName, sourceIcebergCatalog, destinationIcebergCatalog, this.properties, this.sourceFs));
log.info("Found {} matching datasets: {} for the database name: {} and table name: {}", matchingDatasets.size(),
matchingDatasets, dbName, tblName); // until future support added to specify multiple icebergs, count expected always to be one
return matchingDatasets;
}
@Override
public Path commonDatasetRoot() {
return new Path("/");
}
@Override
public Iterator<IcebergDataset> getDatasetsIterator() throws IOException {
return findDatasets().iterator();
}
/**
* Requires both source and destination catalogs to connect to their respective {@link IcebergTable}
* Note: the destination side {@link IcebergTable} should be present before initiating replication
* @return {@link IcebergDataset} with its corresponding source and destination {@link IcebergTable}
*/
protected IcebergDataset createIcebergDataset(String dbName, String tblName, IcebergCatalog sourceIcebergCatalog, IcebergCatalog destinationIcebergCatalog, Properties properties, FileSystem fs) throws IOException {
IcebergTable srcIcebergTable = sourceIcebergCatalog.openTable(dbName, tblName);
Preconditions.checkArgument(sourceIcebergCatalog.tableAlreadyExists(srcIcebergTable), String.format("Missing Source Iceberg Table: {%s}.{%s}", dbName, tblName));
IcebergTable destIcebergTable = destinationIcebergCatalog.openTable(dbName, tblName);
// TODO: Rethink strategy to enforce dest iceberg table
Preconditions.checkArgument(destinationIcebergCatalog.tableAlreadyExists(destIcebergTable), String.format("Missing Destination Iceberg Table: {%s}.{%s}", dbName, tblName));
return new IcebergDataset(dbName, tblName, srcIcebergTable, destIcebergTable, properties, fs);
}
protected static IcebergCatalog createIcebergCatalog(Properties properties, CatalogLocation location) throws IOException {
String prefix = location.getConfigPrefix();
Map<String, String> catalogProperties = buildMapFromPrefixChildren(properties, prefix);
// TODO: Filter properties specific to Hadoop
Configuration configuration = HadoopUtils.getConfFromProperties(properties);
String icebergCatalogClassName = catalogProperties.getOrDefault(ICEBERG_CATALOG_CLASS_KEY, DEFAULT_ICEBERG_CATALOG_CLASS);
return IcebergCatalogFactory.create(icebergCatalogClassName, catalogProperties, configuration);
}
/**
* Filters the properties based on a prefix using {@link ConfigBuilder#loadProps(Properties, String)} and creates a {@link Map}
*/
protected static Map<String, String> buildMapFromPrefixChildren(Properties properties, String configPrefix) {
Map<String, String> catalogProperties = new HashMap<>();
Config config = ConfigBuilder.create().loadProps(properties, configPrefix).build();
for (Map.Entry<String, ConfigValue> entry : config.entrySet()) {
catalogProperties.put(entry.getKey(), entry.getValue().unwrapped().toString());
}
String catalogUri = config.getString(CatalogProperties.URI);
Preconditions.checkNotNull(catalogUri, "Provide: {%s} as Catalog Table Service URI is required", configPrefix + "." + CatalogProperties.URI);
return catalogProperties;
}
}
| 2,602 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergCatalog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
/**
* Any catalog from which to access {@link IcebergTable}s.
*/
public interface IcebergCatalog {
IcebergTable openTable(String dbName, String tableName);
String getCatalogUri();
void initialize(Map<String, String> properties, Configuration configuration);
boolean tableAlreadyExists(IcebergTable icebergTable);
}
| 2,603 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.IOException;
import java.net.URI;
import java.time.Instant;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileSystem;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import static org.apache.gobblin.data.management.copy.iceberg.IcebergSnapshotInfo.ManifestFileInfo;
/**
* Exposes metadata information for a single Iceberg table.
*/
@Slf4j
@AllArgsConstructor
public class IcebergTable {
/** Indicate the table identified by `tableId` does not (or does no longer) exist in the catalog */
public static class TableNotFoundException extends IOException {
@Getter
private final TableIdentifier tableId; // stored purely for logging / diagnostics
public TableNotFoundException(TableIdentifier tableId) {
super("Not found: '" + tableId + "'");
this.tableId = tableId;
}
}
@Getter
private final TableIdentifier tableId;
private final TableOperations tableOps;
private final String catalogUri;
/** @return metadata info limited to the most recent (current) snapshot */
public IcebergSnapshotInfo getCurrentSnapshotInfo() throws IOException {
TableMetadata current = accessTableMetadata();
return createSnapshotInfo(current.currentSnapshot(), Optional.of(current.metadataFileLocation()));
}
/** @return metadata info for most recent snapshot, wherein manifests and their child data files ARE NOT listed */
public IcebergSnapshotInfo getCurrentSnapshotInfoOverviewOnly() throws IOException {
TableMetadata current = accessTableMetadata();
return createSnapshotInfo(current.currentSnapshot(), Optional.of(current.metadataFileLocation()), true);
}
/** @return metadata info for all known snapshots, ordered historically, with *most recent last* */
public Iterator<IcebergSnapshotInfo> getAllSnapshotInfosIterator() throws IOException {
TableMetadata current = accessTableMetadata();
long currentSnapshotId = current.currentSnapshot().snapshotId();
List<Snapshot> snapshots = current.snapshots();
return Iterators.transform(snapshots.iterator(), snapshot -> {
try {
return IcebergTable.this.createSnapshotInfo(
snapshot,
currentSnapshotId == snapshot.snapshotId() ? Optional.of(current.metadataFileLocation()) : Optional.empty()
);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
/**
* @return metadata info for all known snapshots, but incrementally, so overlapping entries within snapshots appear
* only with the first as they're ordered historically, with *most recent last*.
*
* This means the {@link IcebergSnapshotInfo#getManifestFiles()} for the (n+1)-th element of the iterator will omit
* all manifest files and listed data files, already reflected in a {@link IcebergSnapshotInfo#getManifestFiles()}
* from the n-th or prior elements. Given the order of the {@link Iterator<IcebergSnapshotInfo>} returned, this
* mirrors the snapshot-to-file dependencies: each file is returned exactly once with the (oldest) snapshot from
* which it first becomes reachable.
*
* Only the final {@link IcebergSnapshotInfo#getMetadataPath()} is present (for the snapshot it itself deems current).
*/
public Iterator<IcebergSnapshotInfo> getIncrementalSnapshotInfosIterator() throws IOException {
// TODO: investigate using `.addedFiles()`, `.deletedFiles()` to calc this
Set<String> knownFilePaths = Sets.newHashSet(); // as absolute paths are clearly unique, use a single set for all
return Iterators.filter(Iterators.transform(getAllSnapshotInfosIterator(), snapshotInfo -> {
log.info("~{}~ before snapshot '{}' - '{}' total known iceberg paths",
tableId, snapshotInfo.getSnapshotId(), knownFilePaths.size());
if (false == knownFilePaths.add(snapshotInfo.getManifestListPath())) { // already known manifest list!
return snapshotInfo.toBuilder().manifestListPath(null).build(); // use `null` as marker to surrounding `filter`
}
List<IcebergSnapshotInfo.ManifestFileInfo> novelManifestInfos = Lists.newArrayList();
for (ManifestFileInfo mfi : snapshotInfo.getManifestFiles()) {
if (true == knownFilePaths.add(mfi.getManifestFilePath())) { // heretofore unknown
List<String> novelListedPaths = mfi.getListedFilePaths().stream()
.filter(fpath -> true == knownFilePaths.add(fpath)) // heretofore unknown
.collect(Collectors.toList());
if (novelListedPaths.size() == mfi.getListedFilePaths().size()) { // nothing filtered
novelManifestInfos.add(mfi); // reuse orig
} else {
novelManifestInfos.add(new ManifestFileInfo(mfi.getManifestFilePath(), novelListedPaths));
}
} // else, whenever recognized manifest file, skip w/ all its listed paths--which also all would be recognized
}
if (novelManifestInfos.size() == snapshotInfo.getManifestFiles().size()) { // nothing filtered
return snapshotInfo; // reuse orig
} else {
return snapshotInfo.toBuilder().manifestFiles(novelManifestInfos).build(); // replace manifestFiles
}
}), snapshotInfo -> snapshotInfo.getManifestListPath() != null); // remove marked-as-repeat-manifest-list snapshots
}
/** @throws {@link IcebergTable.TableNotFoundException} when table does not exist */
protected TableMetadata accessTableMetadata() throws TableNotFoundException {
TableMetadata current = this.tableOps.current();
return Optional.ofNullable(current).orElseThrow(() -> new TableNotFoundException(this.tableId));
}
protected IcebergSnapshotInfo createSnapshotInfo(Snapshot snapshot, Optional<String> metadataFileLocation) throws IOException {
return createSnapshotInfo(snapshot, metadataFileLocation, false);
}
protected IcebergSnapshotInfo createSnapshotInfo(Snapshot snapshot, Optional<String> metadataFileLocation, boolean skipManifestFileInfo) throws IOException {
// TODO: verify correctness, even when handling 'delete manifests'!
return new IcebergSnapshotInfo(
snapshot.snapshotId(),
Instant.ofEpochMilli(snapshot.timestampMillis()),
metadataFileLocation,
snapshot.manifestListLocation(),
// NOTE: unable to `.stream().map(m -> calcManifestFileInfo(m, tableOps.io()))` due to checked exception
skipManifestFileInfo ? Lists.newArrayList() : calcAllManifestFileInfos(snapshot.allManifests(tableOps.io()), tableOps.io())
);
}
protected static List<IcebergSnapshotInfo.ManifestFileInfo> calcAllManifestFileInfos(List<ManifestFile> manifests, FileIO io) throws IOException {
List<ManifestFileInfo> result = Lists.newArrayList();
for (ManifestFile manifest : manifests) {
result.add(calcManifestFileInfo(manifest, io));
}
return result;
}
protected static IcebergSnapshotInfo.ManifestFileInfo calcManifestFileInfo(ManifestFile manifest, FileIO io) throws IOException {
return new ManifestFileInfo(manifest.path(), discoverDataFilePaths(manifest, io));
}
protected static List<String> discoverDataFilePaths(ManifestFile manifest, FileIO io) throws IOException {
try (CloseableIterable<String> manifestPathsIterable = ManifestFiles.readPaths(manifest, io)) {
return Lists.newArrayList(manifestPathsIterable);
}
}
protected DatasetDescriptor getDatasetDescriptor(FileSystem fs) {
DatasetDescriptor descriptor = new DatasetDescriptor(
DatasetConstants.PLATFORM_ICEBERG,
URI.create(this.catalogUri),
this.tableId.name()
);
descriptor.addMetadata(DatasetConstants.FS_URI, fs.getUri().toString());
return descriptor;
}
/** Registers {@link IcebergTable} after publishing data.
* @param dstMetadata is null if destination {@link IcebergTable} is absent, in which case registration is skipped */
protected void registerIcebergTable(TableMetadata srcMetadata, TableMetadata dstMetadata) {
if (dstMetadata != null) {
// use current destination metadata as 'base metadata' and source as 'updated metadata' while committing
this.tableOps.commit(dstMetadata, srcMetadata.replaceProperties(dstMetadata.properties()));
}
}
}
| 2,604 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.function.Function;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import javax.annotation.concurrent.NotThreadSafe;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.prioritization.PrioritizedCopyableDataset;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.function.CheckedExceptionFunction;
import org.apache.gobblin.util.measurement.GrowthMilestoneTracker;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* Iceberg dataset implementing {@link CopyableDataset}.
*/
@Slf4j
@Getter
public class IcebergDataset implements PrioritizedCopyableDataset {
private final String dbName;
private final String inputTableName;
private final IcebergTable srcIcebergTable;
/** Presumed destination {@link IcebergTable} exists */
private final IcebergTable destIcebergTable;
protected final Properties properties;
protected final FileSystem sourceFs;
private final boolean shouldTolerateMissingSourceFiles = true; // TODO: make parameterizable, if desired
/** Destination database name */
public static final String DESTINATION_DATABASE_KEY = IcebergDatasetFinder.ICEBERG_DATASET_PREFIX + ".destination.database";
public IcebergDataset(String db, String table, IcebergTable srcIcebergTable, IcebergTable destIcebergTable, Properties properties, FileSystem sourceFs) {
this.dbName = db;
this.inputTableName = table;
this.srcIcebergTable = srcIcebergTable;
this.destIcebergTable = destIcebergTable;
this.properties = properties;
this.sourceFs = sourceFs;
}
@Override
public String datasetURN() {
return this.getFileSetId();
}
@Override
public String getDatasetPath() {
try {
return this.destIcebergTable.accessTableMetadata().location();
} catch (IcebergTable.TableNotFoundException e) {
throw new RuntimeException(e);
}
}
/**
* Finds all files read by the table and generates CopyableFiles.
* For the specific semantics see {@link #createFileSets}.
*/
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration) {
return createFileSets(targetFs, configuration);
}
/**
* Finds all files read by the table and generates CopyableFiles.
* For the specific semantics see {@link #createFileSets}.
*/
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration,
Comparator<FileSet<CopyEntity>> prioritizer, PushDownRequestor<FileSet<CopyEntity>> requestor) {
// TODO: Implement PushDownRequestor and priority based copy entity iteration
return createFileSets(targetFs, configuration);
}
/** @return unique ID for this dataset, usable as a {@link CopyEntity}.fileset, for atomic publication grouping */
protected String getFileSetId() {
return this.dbName + "." + this.inputTableName;
}
/**
* Generates {@link FileSet}s, being themselves able to generate {@link CopyEntity}s for all files, data and metadata,
* comprising the iceberg/table, so as to fully specify remaining table replication.
*/
protected Iterator<FileSet<CopyEntity>> createFileSets(FileSystem targetFs, CopyConfiguration configuration) {
FileSet<CopyEntity> fileSet = new IcebergTableFileSet(this.getInputTableName(), this, targetFs, configuration);
return Iterators.singletonIterator(fileSet);
}
/**
* Finds all files, data and metadata, as {@link CopyEntity}s that comprise the table and fully specify remaining
* table replication.
*/
@VisibleForTesting
Collection<CopyEntity> generateCopyEntities(FileSystem targetFs, CopyConfiguration copyConfig) throws IOException {
String fileSet = this.getFileSetId();
List<CopyEntity> copyEntities = Lists.newArrayList();
Map<Path, FileStatus> pathToFileStatus = getFilePathsToFileStatus(targetFs, copyConfig);
log.info("~{}.{}~ found {} candidate source paths", dbName, inputTableName, pathToFileStatus.size());
Configuration defaultHadoopConfiguration = new Configuration();
for (Map.Entry<Path, FileStatus> entry : pathToFileStatus.entrySet()) {
Path srcPath = entry.getKey();
FileStatus srcFileStatus = entry.getValue();
// TODO: should be the same FS each time; try creating once, reusing thereafter, to not recreate wastefully
FileSystem actualSourceFs = getSourceFileSystemFromFileStatus(srcFileStatus, defaultHadoopConfiguration);
Path greatestAncestorPath = PathUtils.getRootPathChild(srcPath);
// preserving ancestor permissions till root path's child between src and dest
List<OwnerAndPermission> ancestorOwnerAndPermissionList =
CopyableFile.resolveReplicatedOwnerAndPermissionsRecursively(actualSourceFs,
srcPath.getParent(), greatestAncestorPath, copyConfig);
CopyableFile fileEntity = CopyableFile.fromOriginAndDestination(
actualSourceFs, srcFileStatus, targetFs.makeQualified(srcPath), copyConfig)
.fileSet(fileSet)
.datasetOutputPath(targetFs.getUri().getPath())
.ancestorsOwnerAndPermission(ancestorOwnerAndPermissionList)
.build();
fileEntity.setSourceData(getSourceDataset(this.sourceFs));
fileEntity.setDestinationData(getDestinationDataset(targetFs));
copyEntities.add(fileEntity);
}
// TODO: Filter properties specific to iceberg registration and avoid serializing every global property
copyEntities.add(createPostPublishStep(this.dbName, this.inputTableName, this.properties));
log.info("~{}.{}~ generated {} copy entities", dbName, inputTableName, copyEntities.size());
return copyEntities;
}
/**
* Finds all files of the Iceberg's current snapshot
* @return a map of path, file status for each file that needs to be copied
*/
protected Map<Path, FileStatus> getFilePathsToFileStatus(FileSystem targetFs, CopyConfiguration copyConfig) throws IOException {
IcebergTable icebergTable = this.getSrcIcebergTable();
/** @return whether `pathStr` is present on `targetFs`, caching results while tunneling checked exceptions outward */
Function<String, Boolean> isPresentOnTarget = CheckedExceptionFunction.wrapToTunneled(pathStr ->
// omit considering timestamp (or other markers of freshness), as files should be immutable
// ATTENTION: `CopyContext.getFileStatus()`, to partake in caching
copyConfig.getCopyContext().getFileStatus(targetFs, new Path(pathStr)).isPresent()
);
// check first for case of nothing to replicate, to avoid needless scanning of a potentially massive iceberg
IcebergSnapshotInfo currentSnapshotOverview = icebergTable.getCurrentSnapshotInfoOverviewOnly();
if (currentSnapshotOverview.getMetadataPath().map(isPresentOnTarget).orElse(false) &&
isPresentOnTarget.apply(currentSnapshotOverview.getManifestListPath())) {
log.info("~{}.{}~ skipping entire iceberg, since snapshot '{}' at '{}' and metadata '{}' both present on target",
dbName, inputTableName, currentSnapshotOverview.getSnapshotId(),
currentSnapshotOverview.getManifestListPath(),
currentSnapshotOverview.getMetadataPath().orElse("<<ERROR: MISSING!>>"));
return Maps.newHashMap();
}
Iterator<IcebergSnapshotInfo> icebergIncrementalSnapshotInfos = icebergTable.getIncrementalSnapshotInfosIterator();
Iterator<String> filePathsIterator = Iterators.concat(
Iterators.transform(icebergIncrementalSnapshotInfos, snapshotInfo -> {
// log each snapshot, for context, in case of `FileNotFoundException` during `FileSystem.getFileStatus()`
String manListPath = snapshotInfo.getManifestListPath();
log.info("~{}.{}~ loaded snapshot '{}' at '{}' from metadata path: '{}'", dbName, inputTableName,
snapshotInfo.getSnapshotId(), manListPath, snapshotInfo.getMetadataPath().orElse("<<inherited>>"));
// ALGO: an iceberg's files form a tree of four levels: metadata.json -> manifest-list -> manifest -> data;
// most critically, all are presumed immutable and uniquely named, although any may be replaced. we depend
// also on incremental copy being run always atomically: to commit each iceberg only upon its full success.
// thus established, the presence of a file at dest (identified by path/name) guarantees its entire subtree is
// already copied--and, given immutability, completion of a prior copy naturally renders that file up-to-date.
// hence, its entire subtree may be short-circuited. nevertheless, absence of a file at dest cannot imply
// its entire subtree necessarily requires copying, because it is possible, even likely in practice, that some
// metadata files would have been replaced (e.g. during snapshot compaction). in such instances, at least
// some of the children pointed to within could have been copied prior, when they previously appeared as a
// child of the current file's predecessor (which this new meta file now replaces).
if (!isPresentOnTarget.apply(manListPath)) {
List<String> missingPaths = snapshotInfo.getSnapshotApexPaths();
for (IcebergSnapshotInfo.ManifestFileInfo mfi : snapshotInfo.getManifestFiles()) {
if (!isPresentOnTarget.apply(mfi.getManifestFilePath())) {
missingPaths.add(mfi.getManifestFilePath());
// being incremental info, no listed paths would have appeared prior w/ other snapshots, so add all now.
// skip verification despite corner case of a snapshot having reorganized/rebalanced manifest contents
// during a period where replication fell so far behind that no snapshots listed among current metadata
// are yet at dest. since the consequence of unnecessary copy is merely wasted data transfer and
// compute--and overall, potential is small--prefer sidestepping expense of exhaustive checking, since
// file count may run into 100k+ (even beyond!)
missingPaths.addAll(mfi.getListedFilePaths());
}
}
log.info("~{}.{}~ snapshot '{}': collected {} additional source paths",
dbName, inputTableName, snapshotInfo.getSnapshotId(), missingPaths.size());
return missingPaths.iterator();
} else {
log.info("~{}.{}~ snapshot '{}' already present on target... skipping (including contents)",
dbName, inputTableName, snapshotInfo.getSnapshotId());
// IMPORTANT: separately consider metadata path, to handle case of 'metadata-only' snapshot reusing mf-list
Optional<String> metadataPath = snapshotInfo.getMetadataPath();
Optional<String> nonReplicatedMetadataPath = metadataPath.filter(p -> !isPresentOnTarget.apply(p));
metadataPath.ifPresent(ignore ->
log.info("~{}.{}~ metadata IS {} already present on target", dbName, inputTableName,
nonReplicatedMetadataPath.isPresent() ? "NOT" : "ALSO")
);
return nonReplicatedMetadataPath.map(p -> Lists.newArrayList(p).iterator()).orElse(Collections.emptyIterator());
}
})
);
Map<Path, FileStatus> results = Maps.newHashMap();
long numSourceFilesNotFound = 0L;
Iterable<String> filePathsIterable = () -> filePathsIterator;
try {
// TODO: investigate whether streaming initialization of `Map` preferable--`getFileStatus()` network calls likely
// to benefit from parallelism
GrowthMilestoneTracker growthTracker = new GrowthMilestoneTracker();
PathErrorConsolidator errorConsolidator = new PathErrorConsolidator();
for (String pathString : filePathsIterable) {
Path path = new Path(pathString);
try {
results.put(path, this.sourceFs.getFileStatus(path));
if (growthTracker.isAnotherMilestone(results.size())) {
log.info("~{}.{}~ collected file status on '{}' source paths", dbName, inputTableName, results.size());
}
} catch (FileNotFoundException fnfe) {
if (!shouldTolerateMissingSourceFiles) {
throw fnfe;
} else {
// log, but otherwise swallow... to continue on
String total = ++numSourceFilesNotFound + " total";
String speculation = "either premature deletion broke time-travel or metadata read interleaved among delete";
errorConsolidator.prepLogMsg(path).ifPresent(msg ->
log.warn("~{}.{}~ source {} ({}... {})", dbName, inputTableName, msg, speculation, total)
);
}
}
}
} catch (CheckedExceptionFunction.WrappedIOException wrapper) {
wrapper.rethrowWrapped();
}
return results;
}
/**
* Stateful object to consolidate error messages (e.g. for logging), per a {@link Path} consolidation strategy.
* OVERVIEW: to avoid run-away logging into the 1000s of lines, consolidate to parent (directory) level:
* 1. on the first path within the dir, log that specific path
* 2. on the second path within the dir, log the dir path as a summarization (with ellipsis)
* 3. thereafter, skip, logging nothing
* The directory, parent path is the default consolidation strategy, yet may be overridden.
*/
@NotThreadSafe
protected static class PathErrorConsolidator {
private final Map<Path, Boolean> consolidatedPathToWhetherErrorLogged = Maps.newHashMap();
/** @return consolidated message to log, iff appropriate; else `Optional.empty()` when deserves inhibition */
public Optional<String> prepLogMsg(Path path) {
Path consolidatedPath = calcPathConsolidation(path);
Boolean hadAlreadyLoggedConsolidation = this.consolidatedPathToWhetherErrorLogged.get(consolidatedPath);
if (!Boolean.valueOf(true).equals(hadAlreadyLoggedConsolidation)) {
boolean shouldLogConsolidationNow = hadAlreadyLoggedConsolidation != null;
consolidatedPathToWhetherErrorLogged.put(consolidatedPath, shouldLogConsolidationNow);
String pathLogString = shouldLogConsolidationNow ? (consolidatedPath.toString() + "/...") : path.toString();
return Optional.of("path" + (shouldLogConsolidationNow ? "s" : " ") + " not found: '" + pathLogString + "'");
} else {
return Optional.empty();
}
}
/** @return a {@link Path} to consolidate around; default is: {@link Path#getParent()} */
protected Path calcPathConsolidation(Path path) {
return path.getParent();
}
}
@VisibleForTesting
static PathErrorConsolidator createPathErrorConsolidator() {
return new PathErrorConsolidator();
}
/** Add layer of indirection to permit test mocking by working around `FileSystem.get()` `static` method */
protected FileSystem getSourceFileSystemFromFileStatus(FileStatus fileStatus, Configuration hadoopConfig) throws IOException {
return fileStatus.getPath().getFileSystem(hadoopConfig);
}
protected DatasetDescriptor getSourceDataset(FileSystem sourceFs) {
return this.srcIcebergTable.getDatasetDescriptor(sourceFs);
}
protected DatasetDescriptor getDestinationDataset(FileSystem targetFs) {
return this.destIcebergTable.getDatasetDescriptor(targetFs);
}
private PostPublishStep createPostPublishStep(String dbName, String inputTableName, Properties properties) {
IcebergRegisterStep icebergRegisterStep = new IcebergRegisterStep(dbName, inputTableName, properties);
return new PostPublishStep(getFileSetId(), Maps.newHashMap(), icebergRegisterStep, 0);
}
}
| 2,605 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergCatalogFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Provides an {@link IcebergCatalog}.
*/
public class IcebergCatalogFactory {
public static IcebergCatalog create(String icebergCatalogClassName, Map<String, String> properties, Configuration configuration) throws IOException {
try {
Class<?> icebergCatalogClass = Class.forName(icebergCatalogClassName);
IcebergCatalog icebergCatalog = (IcebergCatalog) GobblinConstructorUtils.invokeConstructor(icebergCatalogClass, icebergCatalogClassName);
icebergCatalog.initialize(properties, configuration);
return icebergCatalog;
} catch (ReflectiveOperationException ex) {
throw new IOException(ex);
}
}
}
| 2,606 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergTableFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.partition.FileSet;
/**
* A {@link FileSet} for Iceberg datasets containing information associated with an Iceberg table and generates {@link CopyEntity}
*/
public class IcebergTableFileSet extends FileSet<CopyEntity> {
private final CopyConfiguration copyConfiguration;
private final FileSystem targetFs;
private final IcebergDataset icebergDataset;
public IcebergTableFileSet(String name, IcebergDataset icebergDataset, FileSystem targetFs, CopyConfiguration configuration) {
super(name, icebergDataset);
this.copyConfiguration = configuration;
this.targetFs = targetFs;
this.icebergDataset = icebergDataset;
}
@Override
protected Collection<CopyEntity> generateCopyEntities() throws IOException {
return this.icebergDataset.generateCopyEntities(this.targetFs, this.copyConfiguration);
}
}
| 2,607 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergSnapshotInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import lombok.Builder;
import lombok.Data;
import com.google.common.collect.Lists;
/**
* Information about the metadata file and data file paths of a single Iceberg Snapshot.
*/
@Builder(toBuilder = true)
@Data
public class IcebergSnapshotInfo {
@Data
public static class ManifestFileInfo {
private final String manifestFilePath;
private final List<String> listedFilePaths;
}
private final Long snapshotId;
private final Instant timestamp;
/** only for the current snapshot, being whom the metadata file 'belongs to'; `isEmpty()` for all other snapshots */
private final Optional<String> metadataPath;
private final String manifestListPath;
private final List<ManifestFileInfo> manifestFiles;
public List<String> getManifestFilePaths() {
return manifestFiles.stream().map(ManifestFileInfo::getManifestFilePath).collect(Collectors.toList());
}
public List<String> getAllDataFilePaths() {
return manifestFiles.stream().map(ManifestFileInfo::getListedFilePaths).flatMap(List::stream).collect(Collectors.toList());
}
/** @return the `manifestListPath` and `metadataPath`, if present */
public List<String> getSnapshotApexPaths() {
List<String> result = metadataPath.map(Lists::newArrayList).orElse(Lists.newArrayList());
result.add(manifestListPath);
return result;
}
public List<String> getAllPaths() {
List<String> result = getSnapshotApexPaths();
result.addAll(getManifestFilePaths());
result.addAll(getAllDataFilePaths());
return result;
}
}
| 2,608 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/iceberg/IcebergRegisterStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.IOException;
import java.util.Properties;
import org.apache.iceberg.TableMetadata;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
/**
* {@link CommitStep} to perform Iceberg registration.
*/
@Slf4j
@AllArgsConstructor
public class IcebergRegisterStep implements CommitStep {
private final String dbName;
private final String tblName;
private final Properties properties;
@Override
public boolean isCompleted() throws IOException {
return false;
}
@Override
public void execute() throws IOException {
IcebergTable srcIcebergTable = IcebergDatasetFinder.createIcebergCatalog(this.properties, IcebergDatasetFinder.CatalogLocation.SOURCE)
.openTable(this.dbName, this.tblName);
IcebergTable destIcebergTable = IcebergDatasetFinder.createIcebergCatalog(this.properties, IcebergDatasetFinder.CatalogLocation.DESTINATION)
.openTable(this.dbName, this.tblName);
TableMetadata destinationMetadata = null;
try {
destinationMetadata = destIcebergTable.accessTableMetadata();
} catch (IcebergTable.TableNotFoundException tnfe) {
log.warn("Destination TableMetadata doesn't exist because: " , tnfe);
}
destIcebergTable.registerIcebergTable(srcIcebergTable.accessTableMetadata(), destinationMetadata);
}
@Override
public String toString() {
return String.format("Registering Iceberg Table: {%s}.{%s} ", this.dbName, this.tblName);
}
}
| 2,609 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/watermark/FullPathCopyableFileWatermarkGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.watermark;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.WatermarkInterval;
/**
* Implementation of {@link CopyableFileWatermarkGenerator} that generates {@link StringWatermark} based on {@link CopyableFile}'s full path.
*/
public class FullPathCopyableFileWatermarkGenerator implements CopyableFileWatermarkGenerator {
@Override
public Optional<WatermarkInterval> generateWatermarkIntervalForCopyableFile(CopyableFile copyableFile)
throws IOException {
StringWatermark stringWatermark = new StringWatermark(copyableFile.getFileStatus().getPath().toString());
return Optional.of(new WatermarkInterval(stringWatermark, stringWatermark));
}
@Override
public Class<? extends ComparableWatermark> getWatermarkClass() {
return StringWatermark.class;
}
}
| 2,610 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/watermark/CopyableFileWatermarkHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.watermark;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.source.extractor.WatermarkInterval;
/**
* Helper class for {@link CopyableFile} based watermark.
*/
public class CopyableFileWatermarkHelper {
/**
* Watermark creator for workunits created from CopyEntities.
*/
public static final String WATERMARK_CREATOR = CopyConfiguration.COPY_PREFIX + ".watermarkCreator";
/**
* Get Optional {@link CopyableFileWatermarkGenerator} from {@link State}.
*/
public static Optional<CopyableFileWatermarkGenerator> getCopyableFileWatermarkGenerator(State state)
throws IOException {
try {
if (state.contains(WATERMARK_CREATOR)) {
Class<?> watermarkCreatorClass = Class.forName(state.getProp(WATERMARK_CREATOR));
return Optional.of((CopyableFileWatermarkGenerator) watermarkCreatorClass.newInstance());
} else {
return Optional.absent();
}
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
throw new IOException("Failed to instantiate watermarkCreator.");
}
}
/**
* Return Optional {@link WatermarkInterval} for {@link CopyableFile} using {@link CopyableFileWatermarkGenerator}.
*/
public static Optional<WatermarkInterval> getCopyableFileWatermark(CopyableFile copyableFile,
Optional<CopyableFileWatermarkGenerator> watermarkGenerator)
throws IOException {
if (!watermarkGenerator.isPresent()) {
return Optional.absent();
}
return watermarkGenerator.get().generateWatermarkIntervalForCopyableFile(copyableFile);
}
}
| 2,611 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/watermark/CopyableFileWatermarkGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.watermark;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.WatermarkInterval;
/**
* Watermark generator for {@link CopyableFile}.
*/
public interface CopyableFileWatermarkGenerator {
/**
* Generate optional {@link WatermarkInterval} for a given {@link CopyableFile}.
*/
public Optional<WatermarkInterval> generateWatermarkIntervalForCopyableFile(CopyableFile copyableFile) throws IOException;
/**
* @return the implemention class of {@link ComparableWatermark}. It needs to be comparable for tracking/filtering {@link CopyableFile}s.
*/
public Class<? extends ComparableWatermark> getWatermarkClass();
}
| 2,612 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/watermark/StringWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.watermark;
import com.google.common.base.Preconditions;
import com.google.gson.JsonElement;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.extractor.WatermarkSerializerHelper;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* String based {@link ComparableWatermark} implementation.
*/
@AllArgsConstructor
@EqualsAndHashCode
public class StringWatermark implements ComparableWatermark {
@Getter
String value;
@Override
public int compareTo(ComparableWatermark other) {
Preconditions.checkArgument(other instanceof StringWatermark);
return this.value.compareTo(((StringWatermark) other).getValue());
}
@Override
public JsonElement toJson() {
return WatermarkSerializerHelper.convertWatermarkToJson(this);
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
return 0;
}
}
| 2,613 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/writer/FileAwareInputStreamDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import com.codahale.metrics.Meter;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.broker.EmptyKey;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.commit.SpeculativeAttemptAwareConstruct;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
import org.apache.gobblin.data.management.copy.recovery.RecoveryHelper;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
import org.apache.gobblin.instrumented.writer.InstrumentedDataWriter;
import org.apache.gobblin.state.ConstructState;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.FinalState;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.io.StreamCopier;
import org.apache.gobblin.util.io.StreamThrottler;
import org.apache.gobblin.util.io.ThrottledInputStream;
import org.apache.gobblin.writer.DataWriter;
/**
* A {@link DataWriter} to write {@link FileAwareInputStream}
*/
@Slf4j
public class FileAwareInputStreamDataWriter extends InstrumentedDataWriter<FileAwareInputStream> implements FinalState, SpeculativeAttemptAwareConstruct {
public static final String GOBBLIN_COPY_BYTES_COPIED_METER = "gobblin.copy.bytesCopiedMeter";
public static final String GOBBLIN_COPY_CHECK_FILESIZE = "gobblin.copy.checkFileSize";
// setting GOBBLIN_COPY_CHECK_FILESIZE to true may result in failures because the calculation of
// expected bytes to be copied and actual bytes copied may have bugs
public static final boolean DEFAULT_GOBBLIN_COPY_CHECK_FILESIZE = false;
public static final String GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT = "gobblin.copy.task.overwrite.on.commit";
public static final boolean DEFAULT_GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT = false;
protected final AtomicLong bytesWritten = new AtomicLong();
protected final AtomicLong filesWritten = new AtomicLong();
protected final WorkUnitState state;
protected final FileSystem fs;
protected final Path stagingDir;
protected final Path outputDir;
private final Map<String, Object> encryptionConfig;
protected CopyableDatasetMetadata copyableDatasetMetadata;
protected final RecoveryHelper recoveryHelper;
protected final SharedResourcesBroker<GobblinScopeTypes> taskBroker;
protected final int bufferSize;
private final boolean checkFileSize;
private final Options.Rename renameOptions;
private final URI uri;
private final Configuration conf;
protected final Meter copySpeedMeter;
protected final Optional<String> writerAttemptIdOptional;
/**
* The copyable file in the WorkUnit might be modified by converters (e.g. output extensions added / removed).
* This field is set when {@link #write} is called, and points to the actual, possibly modified {@link org.apache.gobblin.data.management.copy.CopyEntity}
* that was written by this writer.
*/
protected Optional<CopyableFile> actualProcessedCopyableFile;
public FileAwareInputStreamDataWriter(State state, int numBranches, int branchId, String writerAttemptId)
throws IOException {
this(state, null, numBranches, branchId, writerAttemptId);
}
public FileAwareInputStreamDataWriter(State state, FileSystem fileSystem, int numBranches, int branchId, String writerAttemptId)
throws IOException {
super(state);
if (numBranches > 1) {
throw new IOException("Distcp can only operate with one branch.");
}
if (!(state instanceof WorkUnitState)) {
throw new RuntimeException(String.format("Distcp requires a %s on construction.", WorkUnitState.class.getSimpleName()));
}
this.state = (WorkUnitState) state;
this.taskBroker = this.state.getTaskBroker();
this.writerAttemptIdOptional = Optional.fromNullable(writerAttemptId);
String uriStr = this.state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, numBranches, branchId),
ConfigurationKeys.LOCAL_FS_URI);
this.conf = WriterUtils.getFsConfiguration(state);
this.uri = URI.create(uriStr);
if (fileSystem != null) {
this.fs = fileSystem;
} else {
this.fs = FileSystem.get(uri, conf);
}
if (state.getPropAsBoolean(ConfigurationKeys.USER_DEFINED_STAGING_DIR_FLAG,false)) {
this.stagingDir = new Path(state.getProp(ConfigurationKeys.USER_DEFINED_STATIC_STAGING_DIR));
} else {
this.stagingDir = this.writerAttemptIdOptional.isPresent() ? WriterUtils.getWriterStagingDir(state, numBranches, branchId, this.writerAttemptIdOptional.get())
: WriterUtils.getWriterStagingDir(state, numBranches, branchId);
}
this.copyableDatasetMetadata =
CopyableDatasetMetadata.deserialize(state.getProp(CopySource.SERIALIZED_COPYABLE_DATASET));
this.outputDir = getOutputDir(state);
this.recoveryHelper = new RecoveryHelper(this.fs, state);
this.actualProcessedCopyableFile = Optional.absent();
// remove the old metric which counts how many bytes are copied, because in case of retries, this can give incorrect value
if (getMetricContext().getMetrics().containsKey(GOBBLIN_COPY_BYTES_COPIED_METER)) {
getMetricContext().remove(GOBBLIN_COPY_BYTES_COPIED_METER);
}
this.copySpeedMeter = getMetricContext().meter(GOBBLIN_COPY_BYTES_COPIED_METER);
this.bufferSize = state.getPropAsInt(CopyConfiguration.BUFFER_SIZE, StreamCopier.DEFAULT_BUFFER_SIZE);
this.encryptionConfig = EncryptionConfigParser
.getConfigForBranch(EncryptionConfigParser.EntityType.WRITER, this.state, numBranches, branchId);
this.checkFileSize = state.getPropAsBoolean(GOBBLIN_COPY_CHECK_FILESIZE, DEFAULT_GOBBLIN_COPY_CHECK_FILESIZE);
boolean taskOverwriteOnCommit = state.getPropAsBoolean(GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT, DEFAULT_GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT);
if (taskOverwriteOnCommit) {
this.renameOptions = Options.Rename.OVERWRITE;
} else {
this.renameOptions = Options.Rename.NONE;
}
}
public FileAwareInputStreamDataWriter(State state, int numBranches, int branchId)
throws IOException {
this(state, numBranches, branchId, null);
}
@Override
public final void writeImpl(FileAwareInputStream fileAwareInputStream)
throws IOException {
CopyableFile copyableFile = fileAwareInputStream.getFile();
if (encryptionConfig != null) {
copyableFile.setDestination(PathUtils.addExtension(copyableFile.getDestination(),
"." + EncryptionConfigParser.getEncryptionType(encryptionConfig)));
}
Path stagingFile = getStagingFilePath(copyableFile);
if (this.actualProcessedCopyableFile.isPresent()) {
throw new IOException(this.getClass().getCanonicalName() + " can only process one file and cannot be reused.");
}
this.fs.mkdirs(stagingFile.getParent());
writeImpl(fileAwareInputStream.getInputStream(), stagingFile, copyableFile, fileAwareInputStream);
this.actualProcessedCopyableFile = Optional.of(copyableFile);
this.filesWritten.incrementAndGet();
}
/**
* Write the contents of input stream into staging path.
*
* <p>
* WriteAt indicates the path where the contents of the input stream should be written. When this method is called,
* the path writeAt.getParent() will exist already, but the path writeAt will not exist. When this method is returned,
* the path writeAt must exist. Any data written to any location other than writeAt or a descendant of writeAt
* will be ignored.
* </p>
*
* @param inputStream {@link FSDataInputStream} whose contents should be written to staging path.
* @param writeAt {@link Path} at which contents should be written.
* @param copyableFile {@link org.apache.gobblin.data.management.copy.CopyEntity} that generated this copy operation.
* @param record The actual {@link FileAwareInputStream} passed to the write method.
* @throws IOException
*/
protected void writeImpl(InputStream inputStream, Path writeAt, CopyableFile copyableFile,
FileAwareInputStream record) throws IOException {
final short replication = this.state.getPropAsShort(ConfigurationKeys.WRITER_FILE_REPLICATION_FACTOR,
copyableFile.getReplication(this.fs));
final long blockSize = copyableFile.getBlockSize(this.fs);
final long fileSize = copyableFile.getFileStatus().getLen();
long expectedBytes = fileSize;
Long maxBytes = null;
// Whether writer must write EXACTLY maxBytes.
boolean mustMatchMaxBytes = false;
if (record.getSplit().isPresent()) {
maxBytes = record.getSplit().get().getHighPosition() - record.getSplit().get().getLowPosition();
if (record.getSplit().get().isLastSplit()) {
expectedBytes = fileSize % blockSize;
mustMatchMaxBytes = false;
} else {
expectedBytes = maxBytes;
mustMatchMaxBytes = true;
}
}
Predicate<FileStatus> fileStatusAttributesFilter = new Predicate<FileStatus>() {
@Override
public boolean apply(FileStatus input) {
return input.getReplication() == replication && input.getBlockSize() == blockSize;
}
};
Optional<FileStatus> persistedFile =
this.recoveryHelper.findPersistedFile(this.state, copyableFile, fileStatusAttributesFilter);
if (persistedFile.isPresent()) {
log.info(String.format("Recovering persisted file %s to %s.", persistedFile.get().getPath(), writeAt));
this.fs.rename(persistedFile.get().getPath(), writeAt);
} else {
// Copy empty directories
if (copyableFile.getFileStatus().isDirectory()) {
this.fs.mkdirs(writeAt);
return;
}
OutputStream os =
this.fs.create(writeAt, true, this.fs.getConf().getInt("io.file.buffer.size", 4096), replication, blockSize);
if (encryptionConfig != null) {
os = EncryptionFactory.buildStreamCryptoProvider(encryptionConfig).encodeOutputStream(os);
}
try {
FileSystem defaultFS = FileSystem.get(new Configuration());
StreamThrottler<GobblinScopeTypes> throttler =
this.taskBroker.getSharedResource(new StreamThrottler.Factory<GobblinScopeTypes>(), new EmptyKey());
ThrottledInputStream throttledInputStream = throttler.throttleInputStream().inputStream(inputStream)
.sourceURI(copyableFile.getOrigin().getPath().makeQualified(defaultFS.getUri(), defaultFS.getWorkingDirectory()).toUri())
.targetURI(this.fs.makeQualified(writeAt).toUri()).build();
StreamCopier copier = new StreamCopier(throttledInputStream, os, maxBytes).withBufferSize(this.bufferSize);
log.info("File {}: Starting copy", copyableFile.getOrigin().getPath());
if (isInstrumentationEnabled()) {
copier.withCopySpeedMeter(this.copySpeedMeter);
}
long numBytes = copier.copy();
if ((this.checkFileSize || mustMatchMaxBytes) && numBytes != expectedBytes) {
throw new IOException(String.format("Incomplete write: expected %d, wrote %d bytes.",
expectedBytes, numBytes));
}
this.bytesWritten.addAndGet(numBytes);
if (isInstrumentationEnabled()) {
log.info("File {}: copied {} bytes, average rate: {} B/s", copyableFile.getOrigin().getPath(),
this.copySpeedMeter.getCount(), this.copySpeedMeter.getMeanRate());
} else {
log.info("File {} copied.", copyableFile.getOrigin().getPath());
}
} catch (NotConfiguredException nce) {
log.warn("Broker error. Some features of stream copier may not be available.", nce);
} finally {
os.close();
log.info("OutputStream for file {} is closed.", writeAt);
inputStream.close();
}
}
}
/**
* Sets the owner/group and permission for the file in the task staging directory
*/
protected void setFilePermissions(CopyableFile file)
throws IOException {
setRecursivePermission(getStagingFilePath(file), file.getDestinationOwnerAndPermission());
}
protected Path getStagingFilePath(CopyableFile file) {
if (DistcpFileSplitter.isSplitWorkUnit(this.state)) {
return new Path(this.stagingDir, DistcpFileSplitter.getSplit(this.state).get().getPartName());
}
return new Path(this.stagingDir, file.getDestination().getName());
}
protected static Path getPartitionOutputRoot(Path outputDir, CopyEntity.DatasetAndPartition datasetAndPartition) {
return new Path(outputDir, datasetAndPartition.identifier());
}
public static Path getOutputFilePath(CopyableFile file, Path outputDir,
CopyEntity.DatasetAndPartition datasetAndPartition) {
Path destinationWithoutSchemeAndAuthority = PathUtils.getPathWithoutSchemeAndAuthority(file.getDestination());
return new Path(getPartitionOutputRoot(outputDir, datasetAndPartition),
PathUtils.withoutLeadingSeparator(destinationWithoutSchemeAndAuthority));
}
public static Path getSplitOutputFilePath(CopyableFile file, Path outputDir,
CopyableFile.DatasetAndPartition datasetAndPartition, State workUnit) {
if (DistcpFileSplitter.isSplitWorkUnit(workUnit)) {
return new Path(getOutputFilePath(file, outputDir, datasetAndPartition).getParent(),
DistcpFileSplitter.getSplit(workUnit).get().getPartName());
} else {
return getOutputFilePath(file, outputDir, datasetAndPartition);
}
}
public static Path getOutputDir(State state) {
return new Path(
state.getProp(ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_OUTPUT_DIR, 1, 0)));
}
/**
* Sets the {@link FsPermission}, owner, group for the path passed. It will not throw exceptions, if operations
* cannot be executed, will warn and continue.
*/
public static void safeSetPathPermission(FileSystem fs, FileStatus file, OwnerAndPermission ownerAndPermission) {
Path path = file.getPath();
OwnerAndPermission targetOwnerAndPermission = setOwnerExecuteBitIfDirectory(file, ownerAndPermission);
try {
if (targetOwnerAndPermission.getFsPermission() != null) {
fs.setPermission(path, targetOwnerAndPermission.getFsPermission());
}
if (!targetOwnerAndPermission.getAclEntries().isEmpty()) {
// use modify acls instead of setAcl since latter requires all three acl entry types: user, group and others
// while overwriting the acls for a given path. If anyone is absent it fails acl transformation validation.
fs.modifyAclEntries(path, targetOwnerAndPermission.getAclEntries());
}
} catch (IOException ioe) {
log.warn("Failed to set permission for directory " + path, ioe);
}
String owner = Strings.isNullOrEmpty(targetOwnerAndPermission.getOwner()) ? null : targetOwnerAndPermission.getOwner();
String group = Strings.isNullOrEmpty(targetOwnerAndPermission.getGroup()) ? null : targetOwnerAndPermission.getGroup();
try {
if (owner != null || group != null) {
fs.setOwner(path, owner, group);
}
} catch (IOException ioe) {
log.warn("Failed to set owner and/or group for path " + path + " to " + owner + ":" + group, ioe);
}
}
/**
* Sets the {@link FsPermission}, owner, group for the path passed. And recursively to all directories and files under
* it.
*/
private void setRecursivePermission(Path path, OwnerAndPermission ownerAndPermission)
throws IOException {
List<FileStatus> files = FileListUtils.listPathsRecursively(this.fs, path, FileListUtils.NO_OP_PATH_FILTER);
// Set permissions bottom up. Permissions are set to files first and then directories
Collections.reverse(files);
for (FileStatus file : files) {
safeSetPathPermission(this.fs, file, ownerAndPermission);
}
}
/**
* The method makes sure it always grants execute permissions for an owner if the <code>file</code> passed is a
* directory. The publisher needs it to publish it to the final directory and list files under this directory.
*/
private static OwnerAndPermission setOwnerExecuteBitIfDirectory(FileStatus file,
OwnerAndPermission ownerAndPermission) {
if (ownerAndPermission.getFsPermission() == null) {
return ownerAndPermission;
}
if (!file.isDir()) {
return ownerAndPermission;
}
return new OwnerAndPermission(ownerAndPermission.getOwner(), ownerAndPermission.getGroup(),
addExecutePermissionToOwner(ownerAndPermission.getFsPermission()), ownerAndPermission.getAclEntries());
}
static FsPermission addExecutePermissionToOwner(FsPermission fsPermission) {
FsAction newOwnerAction = fsPermission.getUserAction().or(FsAction.EXECUTE);
return new FsPermission(newOwnerAction, fsPermission.getGroupAction(), fsPermission.getOtherAction(), fsPermission.getStickyBit());
}
@Override
public long recordsWritten() {
return this.filesWritten.get();
}
@Override
public long bytesWritten()
throws IOException {
return this.bytesWritten.get();
}
/**
* Moves the file from task staging to task output. Each task has its own staging directory but all the tasks share
* the same task output directory.
*
* {@inheritDoc}
*
* @see DataWriter#commit()
*/
@Override
public void commit()
throws IOException {
if (!this.actualProcessedCopyableFile.isPresent()) {
return;
}
CopyableFile copyableFile = this.actualProcessedCopyableFile.get();
Path stagingFilePath = getStagingFilePath(copyableFile);
Path outputFilePath = getSplitOutputFilePath(copyableFile, this.outputDir,
copyableFile.getDatasetAndPartition(this.copyableDatasetMetadata), this.state);
log.info(String.format("Committing data from %s to %s", stagingFilePath, outputFilePath));
try {
setFilePermissions(copyableFile);
Iterator<OwnerAndPermission> ancestorOwnerAndPermissionIt =
copyableFile.getAncestorsOwnerAndPermission() == null ? Collections.emptyIterator()
: copyableFile.getAncestorsOwnerAndPermission().iterator();
ensureDirectoryExists(this.fs, outputFilePath.getParent(), ancestorOwnerAndPermissionIt);
if (copyableFile.getFileStatus().isDirectory() && this.fs.exists(outputFilePath)) {
log.info(String.format("CopyableFile %s is a directory which already exists at %s - skipping overwrite; if necessary, publisher will sync metadata",
stagingFilePath, outputFilePath));
} else {
// Do not store the FileContext after doing to rename because FileContexts are not cached and a new object
// is created for every task's commit
FileContext.getFileContext(this.uri, this.conf).rename(stagingFilePath, outputFilePath, renameOptions);
}
} catch (IOException ioe) {
log.error("Could not commit file {}.", outputFilePath);
// persist file
this.recoveryHelper.persistFile(this.state, copyableFile, stagingFilePath);
throw ioe;
} finally {
try {
this.fs.delete(this.stagingDir, true);
} catch (IOException ioe) {
log.warn("Failed to delete staging path at " + this.stagingDir);
}
}
}
private void ensureDirectoryExists(FileSystem fs, Path path, Iterator<OwnerAndPermission> ownerAndPermissionIterator)
throws IOException {
if (fs.exists(path)) {
return;
}
if (ownerAndPermissionIterator.hasNext()) {
OwnerAndPermission ownerAndPermission = ownerAndPermissionIterator.next();
if (path.getParent() != null) {
ensureDirectoryExists(fs, path.getParent(), ownerAndPermissionIterator);
}
if (!fs.mkdirs(path)) {
// fs.mkdirs returns false if path already existed. Do not overwrite permissions
return;
}
if (ownerAndPermission.getFsPermission() != null) {
log.debug("Applying permissions {} to path {}.", ownerAndPermission.getFsPermission(), path);
fs.setPermission(path, addExecutePermissionToOwner(ownerAndPermission.getFsPermission()));
}
String group = ownerAndPermission.getGroup();
String owner = ownerAndPermission.getOwner();
List<AclEntry> aclEntries = ownerAndPermission.getAclEntries();
try {
if (group != null || owner != null) {
log.debug("Applying owner {} and group {} to path {}.", owner, group, path);
fs.setOwner(path, owner, group);
}
} catch (IOException ioe) {
log.warn("Failed to set owner and/or group for path " + path + " to " + owner + ":" + group, ioe);
}
if (!aclEntries.isEmpty()) {
// use modify acls instead of setAcl since latter requires all three acl entry types: user, group and others
// while overwriting the acls for a given path. If anyone is absent it fails acl transformation validation.
fs.modifyAclEntries(path, aclEntries);
}
} else {
fs.mkdirs(path);
}
}
@Override
public void cleanup()
throws IOException {
// Do nothing
}
@Override
public State getFinalState() {
State state = new State();
if (this.actualProcessedCopyableFile.isPresent()) {
CopySource.serializeCopyEntity(state, this.actualProcessedCopyableFile.get());
}
ConstructState constructState = new ConstructState();
constructState.addOverwriteProperties(state);
return constructState;
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == FileAwareInputStreamDataWriter.class;
}
}
| 2,614 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/writer/TarArchiveInputStreamDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.util.FileUtils;
import org.apache.gobblin.util.io.StreamCopier;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.util.zip.GZIPInputStream;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* An {@link FileAwareInputStreamDataWriter} to write archived {@link InputStream}s. The {@link #write(FileAwareInputStream)}
* method receives a {@link GZIPInputStream} and converts it to a {@link TarArchiveInputStream}. Each
* {@link TarArchiveEntry} is then written to the {@link FileSystem}.
*/
@Slf4j
public class TarArchiveInputStreamDataWriter extends FileAwareInputStreamDataWriter {
public TarArchiveInputStreamDataWriter(State state, int numBranches, int branchId) throws IOException {
super(state, numBranches, branchId);
}
/**
* Untars the passed in {@link FileAwareInputStream} to the task's staging directory. Uses the name of the root
* {@link TarArchiveEntry} in the stream as the directory name for the untarred file. The method also commits the data
* by moving the file from staging to output directory.
*
* @see org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriter#write(org.apache.gobblin.data.management.copy.FileAwareInputStream)
*/
@Override
public void writeImpl(InputStream inputStream, Path writeAt, CopyableFile copyableFile, FileAwareInputStream record)
throws IOException {
this.closer.register(inputStream);
TarArchiveInputStream tarIn = new TarArchiveInputStream(inputStream);
final ReadableByteChannel inputChannel = Channels.newChannel(tarIn);
TarArchiveEntry tarEntry;
// flush the first entry in the tar, which is just the root directory
tarEntry = tarIn.getNextTarEntry();
String tarEntryRootName = StringUtils.remove(tarEntry.getName(), Path.SEPARATOR);
log.info("Unarchiving at " + writeAt);
try {
while ((tarEntry = tarIn.getNextTarEntry()) != null) {
// the API tarEntry.getName() is misleading, it is actually the path of the tarEntry in the tar file
String newTarEntryPath = tarEntry.getName().replace(tarEntryRootName, writeAt.getName());
Path tarEntryStagingPath = new Path(writeAt.getParent(), newTarEntryPath);
if (!FileUtils.isSubPath(writeAt.getParent(), tarEntryStagingPath)) {
throw new IOException(String.format("Extracted file: %s is trying to write outside of output directory: %s",
tarEntryStagingPath, writeAt.getParent()));
}
if (tarEntry.isDirectory() && !this.fs.exists(tarEntryStagingPath)) {
this.fs.mkdirs(tarEntryStagingPath);
} else if (!tarEntry.isDirectory()) {
FSDataOutputStream out = this.fs.create(tarEntryStagingPath, true);
final WritableByteChannel outputChannel = Channels.newChannel(out);
try {
StreamCopier copier = new StreamCopier(inputChannel, outputChannel);
if (isInstrumentationEnabled()) {
copier.withCopySpeedMeter(this.copySpeedMeter);
}
this.bytesWritten.addAndGet(copier.copy());
if (isInstrumentationEnabled()) {
log.info("File {}: copied {} bytes, average rate: {} B/s", copyableFile.getOrigin().getPath(), this.copySpeedMeter.getCount(), this.copySpeedMeter.getMeanRate());
} else {
log.info("File {} copied.", copyableFile.getOrigin().getPath());
}
} finally {
out.close();
outputChannel.close();
}
}
}
} finally {
tarIn.close();
inputChannel.close();
inputStream.close();
}
}
}
| 2,615 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/writer/TarArchiveInputStreamDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import java.io.IOException;
/**
* A {@link DataWriterBuilder} for {@link TarArchiveInputStreamDataWriter}
*/
public class TarArchiveInputStreamDataWriterBuilder extends FileAwareInputStreamDataWriterBuilder {
@Override
protected DataWriter<FileAwareInputStream> buildWriter() throws IOException {
return new TarArchiveInputStreamDataWriter(this.destination.getProperties(), this.branches, this.branch);
}
}
| 2,616 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/writer/FileAwareInputStreamDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
/**
* A {@link DataWriterBuilder} for {@link FileAwareInputStreamDataWriter}
*/
public class FileAwareInputStreamDataWriterBuilder extends DataWriterBuilder<String, FileAwareInputStream> {
@Override
public final DataWriter<FileAwareInputStream> build() throws IOException {
setJobSpecificOutputPaths(this.destination.getProperties());
// Each writer/mapper gets its own task-staging directory
this.destination.getProperties().setProp(ConfigurationKeys.WRITER_FILE_PATH, this.writerId);
return buildWriter();
}
protected DataWriter<FileAwareInputStream> buildWriter() throws IOException {
return new FileAwareInputStreamDataWriter(this.destination.getProperties(), this.branches, this.branch, this.writerAttemptId);
}
/**
* Each job gets its own task-staging and task-output directory. Update the staging and output directories to
* contain job_id. This is to make sure uncleaned data from previous execution does not corrupt final published data
* produced by this execution.
*/
public synchronized static void setJobSpecificOutputPaths(State state) {
// Other tasks may have set this already
if (!StringUtils.containsIgnoreCase(state.getProp(ConfigurationKeys.WRITER_STAGING_DIR),
state.getProp(ConfigurationKeys.JOB_ID_KEY))) {
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(state.getProp(ConfigurationKeys.WRITER_STAGING_DIR),
state.getProp(ConfigurationKeys.JOB_ID_KEY)));
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
state.getProp(ConfigurationKeys.JOB_ID_KEY)));
}
}
}
| 2,617 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.partition.FileSet;
import lombok.Getter;
/**
* A {@link FileSet} for Hive datasets. Contains information on Hive table.
*/
@Getter
public abstract class HiveFileSet extends FileSet<CopyEntity> {
private final Table table;
private final HiveDataset hiveDataset;
public HiveFileSet(String name, HiveDataset dataset) {
super(name, dataset);
this.table = dataset.getTable();
this.hiveDataset = dataset;
}
}
| 2,618 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/PathBasedPartitionFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.hive.metastore.api.Partition;
/**
* One simple implementation for {@link HivePartitionExtendedFilter},
* which looked at each partition's path and decide if a partition should be kept or not.
*/
public class PathBasedPartitionFilter implements HivePartitionExtendedFilter {
private String filterRegex;
private Pattern pattern;
public PathBasedPartitionFilter(String filterRegex) {
this.filterRegex = filterRegex;
pattern = Pattern.compile(filterRegex);
}
@Override
/* For partitions with path that contains filterRegex as part of it, will be filtered out. */
public boolean accept(Partition partition){
Matcher matcher = pattern.matcher(partition.getSd().getLocation());
return matcher.find();
}
}
| 2,619 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveTableLocationNotMatchException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
/** Denotes that the desired target table location in Hive does not match the existing target table location */
public class HiveTableLocationNotMatchException extends IOException {
private static final long serialVersionUID = 1L;
private final Path desiredTargetTableLocation;
private final Path existingTargetTableLocation;
public HiveTableLocationNotMatchException(Path desired, Path existing) {
super(String.format("Desired target location %s and already registered target location %s do not agree.",
desired, existing));
this.desiredTargetTableLocation = desired;
this.existingTargetTableLocation = existing;
}
public Path getDesiredTargetTableLocation(){
return this.desiredTargetTableLocation;
}
public Path getExistingTargetTableLocation() {
return this.existingTargetTableLocation;
}
}
| 2,620 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValue;
import com.typesafe.config.ConfigValueType;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder.DbAndTable;
import org.apache.gobblin.data.management.copy.prioritization.PrioritizedCopyableDataset;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* Hive dataset implementing {@link CopyableDataset}.
*/
@Slf4j
@Alpha
@Getter
@ToString
public class HiveDataset implements PrioritizedCopyableDataset {
private static Splitter SPLIT_ON_DOT = Splitter.on(".").omitEmptyStrings().trimResults();
public static final ImmutableSet<TableType> COPYABLE_TABLES = ImmutableSet.of(TableType.EXTERNAL_TABLE, TableType.MANAGED_TABLE);
public static final String REGISTERER = "registerer";
public static final String REGISTRATION_GENERATION_TIME_MILLIS = "registrationGenerationTimeMillis";
public static final String DATASET_NAME_PATTERN_KEY = "hive.datasetNamePattern";
public static final String DATABASE = "Database";
public static final String TABLE = "Table";
public static final String DATABASE_TOKEN = "$DB";
public static final String TABLE_TOKEN = "$TABLE";
public static final String LOGICAL_DB_TOKEN = "$LOGICAL_DB";
public static final String LOGICAL_TABLE_TOKEN = "$LOGICAL_TABLE";
@Getter
@Setter
private String datasetPath;
// Will not be serialized/de-serialized
@Getter
protected transient final Properties properties;
protected transient final FileSystem fs;
protected transient final HiveMetastoreClientPool clientPool;
private transient final MetricContext metricContext;
protected transient final Table table;
protected transient final Config datasetConfig;
// Only set if table has exactly one location
protected final Optional<Path> tableRootPath;
protected final String tableIdentifier;
protected final Optional<String> datasetNamePattern;
protected final DbAndTable dbAndTable;
protected final DbAndTable logicalDbAndTable;
public HiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Properties properties) {
this(fs, clientPool, table, properties, ConfigFactory.empty());
}
public HiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Config datasetConfig) {
this(fs, clientPool, table, new Properties(), datasetConfig);
}
public HiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Properties properties, Config datasetConfig) {
this.fs = fs;
this.clientPool = clientPool;
this.table = table;
this.properties = properties;
this.tableRootPath = PathUtils.isGlob(this.table.getDataLocation()) ? Optional.<Path> absent() :
Optional.fromNullable(this.table.getDataLocation());
this.tableIdentifier = this.table.getDbName() + "." + this.table.getTableName();
this.datasetNamePattern = Optional.fromNullable(ConfigUtils.getString(datasetConfig, DATASET_NAME_PATTERN_KEY, null));
this.dbAndTable = new DbAndTable(table.getDbName(), table.getTableName());
if (this.datasetNamePattern.isPresent()) {
this.logicalDbAndTable = parseLogicalDbAndTable(this.datasetNamePattern.get(), this.dbAndTable, LOGICAL_DB_TOKEN, LOGICAL_TABLE_TOKEN);
} else {
this.logicalDbAndTable = this.dbAndTable;
}
this.datasetConfig = resolveConfig(datasetConfig, dbAndTable, logicalDbAndTable);
this.metricContext = Instrumented.getMetricContext(new State(properties), HiveDataset.class,
Lists.<Tag<?>> newArrayList(new Tag<>(DATABASE, table.getDbName()), new Tag<>(TABLE, table.getTableName())));
}
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
if (!canCopyTable(configuration)) {
return Collections.emptyIterator();
}
try {
return new HiveCopyEntityHelper(this, configuration, targetFs).getCopyEntities(configuration);
} catch (IOException ioe) {
log.error("Failed to copy table " + this.table, ioe);
if (configuration.isAbortOnSingleDatasetFailure()) {
throw new RuntimeException(ioe);
}
return Collections.emptyIterator();
}
}
/**
* Finds all files read by the table and generates CopyableFiles.
* For the specific semantics see {@link HiveCopyEntityHelper#getCopyEntities}.
*/
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration,
Comparator<FileSet<CopyEntity>> prioritizer, PushDownRequestor<FileSet<CopyEntity>> requestor)
throws IOException {
if (!canCopyTable(configuration)) {
return Collections.emptyIterator();
}
try {
List<FileSet<CopyEntity>> fileSetList = Lists.newArrayList(new HiveCopyEntityHelper(this, configuration, targetFs)
.getCopyEntities(configuration, prioritizer, requestor));
Collections.sort(fileSetList, prioritizer);
return fileSetList.iterator();
} catch (IOException ioe) {
log.error("Failed to copy table " + this.table, ioe);
if (configuration.isAbortOnSingleDatasetFailure()) {
throw new RuntimeException(ioe);
}
return Collections.emptyIterator();
}
}
@Override
public String datasetURN() {
return this.table.getCompleteName();
}
/**
* Resolve {@value #DATABASE_TOKEN} and {@value #TABLE_TOKEN} in <code>rawString</code> to {@link Table#getDbName()}
* and {@link Table#getTableName()}
*/
public static String resolveTemplate(String rawString, Table table) {
if (StringUtils.isBlank(rawString)) {
return rawString;
}
return StringUtils.replaceEach(rawString, new String[] { DATABASE_TOKEN, TABLE_TOKEN }, new String[] { table.getDbName(), table.getTableName() });
}
/***
* Parse logical Database and Table name from a given DbAndTable object.
*
* Eg.
* Dataset Name Pattern : prod_$LOGICAL_DB_linkedin.prod_$LOGICAL_TABLE_linkedin
* Source DB and Table : prod_dbName_linkedin.prod_tableName_linkedin
* Logical DB Token : $LOGICAL_DB
* Logical Table Token : $LOGICAL_TABLE
* Parsed Logical DB and Table : dbName.tableName
*
* @param datasetNamePattern Dataset name pattern.
* @param dbAndTable Source DB and Table.
* @param logicalDbToken Logical DB token.
* @param logicalTableToken Logical Table token.
* @return Parsed logical DB and Table.
*/
@VisibleForTesting
protected static DbAndTable parseLogicalDbAndTable(String datasetNamePattern, DbAndTable dbAndTable,
String logicalDbToken, String logicalTableToken) {
Preconditions.checkArgument(StringUtils.isNotBlank(datasetNamePattern), "Dataset name pattern must not be empty.");
List<String> datasetNameSplit = Lists.newArrayList(SPLIT_ON_DOT.split(datasetNamePattern));
Preconditions.checkArgument(datasetNameSplit.size() == 2, "Dataset name pattern must of the format: "
+ "dbPrefix_$LOGICAL_DB_dbPostfix.tablePrefix_$LOGICAL_TABLE_tablePostfix (prefix / postfix are optional)");
String dbNamePattern = datasetNameSplit.get(0);
String tableNamePattern = datasetNameSplit.get(1);
String logicalDb = extractTokenValueFromEntity(dbAndTable.getDb(), dbNamePattern, logicalDbToken);
String logicalTable = extractTokenValueFromEntity(dbAndTable.getTable(), tableNamePattern, logicalTableToken);
return new DbAndTable(logicalDb, logicalTable);
}
/***
* Extract token value from source entity, where token value is represented by a token in the source entity.
*
* Eg.
* Source Entity : prod_tableName_avro
* Source Template: prod_$LOGICAL_TABLE_avro
* Token : $LOGICAL_TABLE
* Extracted Value: tableName
*
* @param sourceEntity Source entity (typically a table or database name).
* @param sourceTemplate Source template representing the source entity.
* @param token Token representing the value to extract from the source entity using the template.
* @return Extracted token value from the source entity.
*/
@VisibleForTesting
protected static String extractTokenValueFromEntity(String sourceEntity, String sourceTemplate, String token) {
Preconditions.checkArgument(StringUtils.isNotBlank(sourceEntity), "Source entity should not be blank");
Preconditions.checkArgument(StringUtils.isNotBlank(sourceTemplate), "Source template should not be blank");
Preconditions.checkArgument(sourceTemplate.contains(token), String.format("Source template: %s should contain token: %s", sourceTemplate, token));
String extractedValue = sourceEntity;
List<String> preAndPostFix = Lists.newArrayList(Splitter.on(token).trimResults().split(sourceTemplate));
extractedValue = StringUtils.removeStart(extractedValue, preAndPostFix.get(0));
extractedValue = StringUtils.removeEnd(extractedValue, preAndPostFix.get(1));
return extractedValue;
}
/***
* Replace various tokens (DB, TABLE, LOGICAL_DB, LOGICAL_TABLE) with their values.
*
* @param datasetConfig The config object that needs to be resolved with final values.
* @param realDbAndTable Real DB and Table .
* @param logicalDbAndTable Logical DB and Table.
* @return Resolved config object.
*/
@VisibleForTesting
protected static Config resolveConfig(Config datasetConfig, DbAndTable realDbAndTable, DbAndTable logicalDbAndTable) {
Preconditions.checkNotNull(datasetConfig, "Dataset config should not be null");
Preconditions.checkNotNull(realDbAndTable, "Real DB and table should not be null");
Preconditions.checkNotNull(logicalDbAndTable, "Logical DB and table should not be null");
ImmutableMap.Builder<String, Object> immutableMapBuilder = ImmutableMap.builder();
Config resolvedConfig = datasetConfig.resolve();
for (Map.Entry<String, ConfigValue> entry : resolvedConfig.entrySet()) {
if (ConfigValueType.LIST.equals(entry.getValue().valueType())) {
List<String> rawValueList = resolvedConfig.getStringList(entry.getKey());
List<String> resolvedValueList = Lists.newArrayList();
for (String rawValue : rawValueList) {
String resolvedValue = StringUtils.replaceEach(rawValue,
new String[] { DATABASE_TOKEN, TABLE_TOKEN, LOGICAL_DB_TOKEN, LOGICAL_TABLE_TOKEN },
new String[] { realDbAndTable.getDb(), realDbAndTable.getTable(), logicalDbAndTable.getDb(), logicalDbAndTable.getTable() });
resolvedValueList.add(resolvedValue);
}
StringBuilder listToStringWithQuotes = new StringBuilder();
for (String resolvedValueStr : resolvedValueList) {
if (listToStringWithQuotes.length() > 0) {
listToStringWithQuotes.append(",");
}
listToStringWithQuotes.append("\"").append(resolvedValueStr).append("\"");
}
immutableMapBuilder.put(entry.getKey(), listToStringWithQuotes.toString());
} else {
String resolvedValue = StringUtils.replaceEach(resolvedConfig.getString(entry.getKey()),
new String[] { DATABASE_TOKEN, TABLE_TOKEN, LOGICAL_DB_TOKEN, LOGICAL_TABLE_TOKEN },
new String[] { realDbAndTable.getDb(), realDbAndTable.getTable(), logicalDbAndTable.getDb(), logicalDbAndTable.getTable() });
immutableMapBuilder.put(entry.getKey(), resolvedValue);
}
}
return ConfigFactory.parseMap(immutableMapBuilder.build());
}
/**
* Sort all partitions inplace on the basis of complete name ie dbName.tableName.partitionName
*/
public static List<Partition> sortPartitions(List<Partition> partitions) {
Collections.sort(partitions, new Comparator<Partition>() {
@Override
public int compare(Partition o1, Partition o2) {
return o1.getCompleteName().compareTo(o2.getCompleteName());
}
});
return partitions;
}
/**
* This method returns a sorted list of partitions.
*/
public List<Partition> getPartitionsFromDataset() throws IOException{
try (AutoReturnableObject<IMetaStoreClient> client = getClientPool().getClient()) {
List<Partition> partitions =
HiveUtils.getPartitions(client.get(), getTable(), Optional.<String>absent());
return sortPartitions(partitions);
}
}
private boolean canCopyTable(CopyConfiguration configuration) {
if (!COPYABLE_TABLES.contains(this.table.getTableType())) {
String message = String.format("Not copying %s: tables of type %s are not copyable.", this.table.getCompleteName(),
this.table.getTableType());
log.warn(message);
if (configuration.isAbortOnSingleDatasetFailure()) {
throw new RuntimeException(message);
}
return false;
}
return true;
}
}
| 2,621 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveTargetPathHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.util.PathUtils;
public class HiveTargetPathHelper {
/**
* Specifies a root path for the data in a table. All files containing table data will be placed under this directory.
* <p>
* Does some token replacement in the input path. For example, if the table myTable is in DB myDatabase:
* /data/$DB/$TABLE -> /data/myDatabase/myTable.
* /data/$TABLE -> /data/myTable
* /data -> /data/myTable
* </p>
*
* See javadoc for {@link #getTargetPath} for further explanation.
*/
public static final String COPY_TARGET_TABLE_ROOT = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.table.root";
/**
* These two options, in pair, specify the output location of the data files on copy
* {@link #COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED} specified the prefix of the path (without Scheme and Authority ) to be replaced
* {@link #COPY_TARGET_TABLE_PREFIX_REPLACEMENT} specified the replacement of {@link #COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED}
* <p>
* for example, if the data file is $sourceFs/data/databases/DB/Table/Snapshot/part-00000.avro ,
* {@link #COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED} is /data/databases
* {@link #COPY_TARGET_TABLE_PREFIX_REPLACEMENT} is /data/databases/_parallel
*
* then, the output location for that file will be
* $targetFs/data/databases/_parallel/DB/Table/Snapshot/part-00000.avro
* </p>
*/
public static final String COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.table.prefixToBeReplaced";
public static final String COPY_TARGET_TABLE_PREFIX_REPLACEMENT =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.table.prefixReplacement";
/**
* Specifies that, on copy, data files for this table should all be relocated to a single directory per partition.
* See javadoc for {@link #getTargetPath} for further explanation.
*/
public static final String RELOCATE_DATA_FILES_KEY =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.relocate.data.files";
public static final String DEFAULT_RELOCATE_DATA_FILES = Boolean.toString(false);
private final boolean relocateDataFiles;
private final Optional<Path> targetTableRoot;
private final Optional<Path> targetTablePrefixTobeReplaced;
private final Optional<Path> targetTablePrefixReplacement;
private final HiveDataset dataset;
public HiveTargetPathHelper(HiveDataset dataset) {
this.dataset = dataset;
this.relocateDataFiles = Boolean
.valueOf(this.dataset.getProperties().getProperty(RELOCATE_DATA_FILES_KEY, DEFAULT_RELOCATE_DATA_FILES));
this.targetTableRoot = this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_ROOT)
? Optional.of(resolvePath(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_ROOT),
this.dataset.getTable().getDbName(), this.dataset.getTable().getTableName()))
: Optional.<Path> absent();
this.targetTablePrefixTobeReplaced =
this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED)
? Optional.of(new Path(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED)))
: Optional.<Path> absent();
this.targetTablePrefixReplacement = this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_PREFIX_REPLACEMENT)
? Optional.of(new Path(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_PREFIX_REPLACEMENT)))
: Optional.<Path> absent();
}
private static Path addPartitionToPath(Path path, Partition partition) {
for (String partitionValue : partition.getValues()) {
path = new Path(path, partitionValue);
}
return path;
}
/**
* Takes a path with tokens {@link #databaseToken} or {@link #tableToken} and replaces these tokens with the actual
* database names and table name. For example, if db is myDatabase, table is myTable, then /data/$DB/$TABLE will be
* resolved to /data/myDatabase/myTable.
*/
protected static Path resolvePath(String pattern, String database, String table) {
pattern = pattern.replace(HiveDataset.DATABASE_TOKEN, database);
if (pattern.contains(HiveDataset.TABLE_TOKEN)) {
pattern = pattern.replace(HiveDataset.TABLE_TOKEN, table);
return new Path(pattern);
} else {
return new Path(pattern, table);
}
}
/**
* Compute the target {@link Path} for a file or directory copied by Hive distcp.
*
* <p>
* The target locations of data files for this table depend on the values of the resolved table root (e.g.
* the value of {@link #COPY_TARGET_TABLE_ROOT} with tokens replaced) and {@link #RELOCATE_DATA_FILES_KEY}:
* * if {@link #RELOCATE_DATA_FILES_KEY} is true, then origin file /path/to/file/myFile will be written to
* /resolved/table/root/<partition>/myFile
* * if {@link #COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED} and {@link #COPY_TARGET_TABLE_PREFIX_REPLACEMENT} are defined,
* then the specified prefix in each file will be replaced by the specified replacement.
* * otherwise, if the resolved table root is defined (e.g. {@link #COPY_TARGET_TABLE_ROOT} is defined in the
* properties), we define:
* origin_table_root := the deepest non glob ancestor of table.getSc().getLocation() iff getLocation() points to
* a single glob. (e.g. /path/to/*/files -> /path/to). If getLocation() contains none
* or multiple globs, job will fail.
* relative_path := path of the file relative to origin_table_root. If the path of the file is not a descendant
* of origin_table_root, job will fail.
* target_path := /resolved/table/root/relative/path
* This mode is useful when moving a table with a complicated directory structure to a different base directory.
* * otherwise the target is identical to the origin path.
* </p>
*
*
* @param sourcePath Source path to be transformed.
* @param targetFs target {@link FileSystem}
* @param partition partition this file belongs to.
* @param isConcreteFile true if this is a path to an existing file in HDFS.
*/
public Path getTargetPath(Path sourcePath, FileSystem targetFs, Optional<Partition> partition, boolean isConcreteFile) {
if (this.relocateDataFiles) {
Preconditions.checkArgument(this.targetTableRoot.isPresent(), "Must define %s to relocate data files.",
COPY_TARGET_TABLE_ROOT);
Path path = this.targetTableRoot.get();
if (partition.isPresent()) {
path = addPartitionToPath(path, partition.get());
}
if (!isConcreteFile) {
return targetFs.makeQualified(path);
}
return targetFs.makeQualified(new Path(path, sourcePath.getName()));
}
// both prefixs must be present as the same time
// can not used with option {@link #COPY_TARGET_TABLE_ROOT}
if (this.targetTablePrefixTobeReplaced.isPresent() || this.targetTablePrefixReplacement.isPresent()) {
Preconditions.checkState(this.targetTablePrefixTobeReplaced.isPresent(),
String.format("Must specify both %s option and %s option together", COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED,
COPY_TARGET_TABLE_PREFIX_REPLACEMENT));
Preconditions.checkState(this.targetTablePrefixReplacement.isPresent(),
String.format("Must specify both %s option and %s option together", COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED,
COPY_TARGET_TABLE_PREFIX_REPLACEMENT));
Preconditions.checkState(!this.targetTableRoot.isPresent(),
String.format("Can not specify the option %s with option %s ", COPY_TARGET_TABLE_ROOT,
COPY_TARGET_TABLE_PREFIX_REPLACEMENT));
Path targetPathWithoutSchemeAndAuthority =
HiveCopyEntityHelper.replacedPrefix(sourcePath, this.targetTablePrefixTobeReplaced.get(), this.targetTablePrefixReplacement.get());
return targetFs.makeQualified(targetPathWithoutSchemeAndAuthority);
} else if (this.targetTableRoot.isPresent()) {
Preconditions.checkArgument(this.dataset.getTableRootPath().isPresent(),
"Cannot move paths to a new root unless table has exactly one location.");
Preconditions.checkArgument(PathUtils.isAncestor(this.dataset.getTableRootPath().get(), sourcePath),
"When moving paths to a new root, all locations must be descendants of the table root location. "
+ "Table root location: %s, file location: %s.", this.dataset.getTableRootPath(), sourcePath);
Path relativePath = PathUtils.relativizePath(sourcePath, this.dataset.getTableRootPath().get());
return targetFs.makeQualified(new Path(this.targetTableRoot.get(), relativePath));
} else {
return targetFs.makeQualified(PathUtils.getPathWithoutSchemeAndAuthority(sourcePath));
}
}
}
| 2,622 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveCopyEntityHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.thrift.TException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import com.google.gson.Gson;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Builder;
import lombok.Data;
import lombok.Getter;
import lombok.Singular;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.hive.avro.HiveAvroCopyEntityHelper;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.hive.HiveConstants;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.hive.HiveRegProps;
import org.apache.gobblin.hive.HiveRegisterStep;
import org.apache.gobblin.hive.PartitionDeregisterStep;
import org.apache.gobblin.hive.TableDeregisterStep;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.MultiTimingEvent;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import org.apache.gobblin.util.filesystem.ModTimeDataFileVersionStrategy;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* Creates {@link CopyEntity}s for copying a Hive table.
*/
@Slf4j
@Getter
public class HiveCopyEntityHelper {
public static final String EXISTING_ENTITY_POLICY_KEY =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".existing.entity.conflict.policy";
public static final String DEFAULT_EXISTING_ENTITY_POLICY = ExistingEntityPolicy.ABORT.name();
public static final String UNMANAGED_DATA_POLICY_KEY =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".unmanaged.data.conflict.policy";
public static final String DEFAULT_UNMANAGED_DATA_POLICY = UnmanagedDataPolicy.ABORT.name();
public static final String SOURCE_METASTORE_URI_KEY =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.metastore.uri";
/** Target metastore URI */
public static final String TARGET_METASTORE_URI_KEY =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.metastore.uri";
/** Target database name */
public static final String TARGET_DATABASE_KEY = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.target.database";
/** A filter to select partitions to copy */
public static final String COPY_PARTITIONS_FILTER_CONSTANT =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.partition.filter.constant";
/** Use an implementation of {@link PartitionFilterGenerator} to dynamically create partition filter. The value should
* be the name of the implementation to use. */
public static final String COPY_PARTITION_FILTER_GENERATOR =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.partition.filter.generator";
/** A predicate applied to each partition before any file listing.
* If the predicate returns true, the partition will be skipped. */
public static final String FAST_PARTITION_SKIP_PREDICATE =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.fast.partition.skip.predicate";
/** A predicate applied to non partition table before any file listing.
* If the predicate returns true, the table will be skipped. */
public static final String FAST_TABLE_SKIP_PREDICATE =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.fast.table.skip.predicate";
/** Method for deleting files on deregister. One of {@link DeregisterFileDeleteMethod}. */
public static final String DELETE_FILES_ON_DEREGISTER =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.deregister.fileDeleteMethod";
public static final DeregisterFileDeleteMethod DEFAULT_DEREGISTER_DELETE_METHOD =
DeregisterFileDeleteMethod.NO_DELETE;
/**
* Config key to specify if {@link IMetaStoreClient }'s filtering method {@link IMetaStoreClient#listPartitionsByFilter} is not enough
* for filtering out specific partitions.
* For example, if you specify "Path" as the filter type and "Hourly" as the filtering condition,
* partitions with Path containing '/Hourly/' will be kept.
*/
public static final String HIVE_PARTITION_EXTENDED_FILTER_TYPE = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".extendedFilterType";
static final Gson gson = new Gson();
private static final String source_client = "source_client";
private static final String target_client = "target_client";
public static final String GOBBLIN_DISTCP = "gobblin-distcp";
public static class Stages {
public static final String EXISTING_PARTITION = "ExistingPartition";
public static final String PARTITION_SKIP_PREDICATE = "PartitionSkipPredicate";
public static final String CREATE_LOCATIONS = "CreateLocations";
public static final String FULL_PATH_DIFF = "FullPathDiff";
public static final String CREATE_DELETE_UNITS = "CreateDeleteUnits";
public static final String CREATE_COPY_UNITS = "CreateCopyUnits";
public static final String SOURCE_PATH_LISTING = "SourcePathListing";
public static final String TARGET_EXISTING_PATH_LISTING = "TargetExistingPathListing";
public static final String DESIRED_PATHS_LISTING = "DesiredPathsListing";
public static final String PATH_DIFF = "PathDiff";
public static final String COMPUTE_DELETE_PATHS = "ComputeDeletePaths";
public static final String GET_TABLES = "GetTables";
public static final String COMPUTE_TARGETS = "ComputeTargets";
}
private final long startTime;
private final HiveDataset dataset;
private final CopyConfiguration configuration;
private FileSystem targetFs;
private final HiveMetastoreClientPool targetClientPool;
private final String targetDatabase;
private final HiveRegProps hiveRegProps;
private Optional<Table> existingTargetTable;
private final Table targetTable;
private final Optional<String> sourceMetastoreURI;
private final Optional<String> targetMetastoreURI;
private final ExistingEntityPolicy existingEntityPolicy;
private final UnmanagedDataPolicy unmanagedDataPolicy;
private final Optional<String> partitionFilter;
private final Optional<? extends HivePartitionExtendedFilter> hivePartitionExtendedFilter;
private final Optional<Predicate<HivePartitionFileSet>> fastPartitionSkip;
private final Optional<Predicate<HiveCopyEntityHelper>> fastTableSkip;
private final DeregisterFileDeleteMethod deleteMethod;
private final Optional<CommitStep> tableRegistrationStep;
private Map<List<String>, Partition> sourcePartitions;
private Map<List<String>, Partition> targetPartitions;
private final boolean enforceFileSizeMatch;
private final EventSubmitter eventSubmitter;
@Getter
protected final HiveTargetPathHelper targetPathHelper;
/**
* Defines what should be done for partitions that exist in the target but are not compatible with the source.
*/
public enum ExistingEntityPolicy {
/** Deregister target partition, delete its files, and create a new partition with correct values. */
REPLACE_PARTITIONS,
/** Deregister target table, do NOT delete its files, and create a new table with correct values. */
REPLACE_TABLE,
/** A combination of {@link #REPLACE_TABLE} and {@link #REPLACE_PARTITIONS}*/
REPLACE_TABLE_AND_PARTITIONS,
/** Keep the target table as registered while updating the file location */
UPDATE_TABLE,
/** Abort copying of conflict table. */
ABORT
}
/**
* Defines what should be done for data that is not managed by the existing target table / partition.
*/
public enum UnmanagedDataPolicy {
/** Delete any data that is not managed by the existing target table / partition. */
DELETE_UNMANAGED_DATA,
/** Abort copying of conflict table / partition. */
ABORT
}
public enum DeregisterFileDeleteMethod {
/** Delete the files pointed at by the input format. */
INPUT_FORMAT,
/** Delete all files at the partition location recursively. */
RECURSIVE,
/** Don't delete files, just deregister partition. */
NO_DELETE
}
/**
* A container for the differences between desired and existing files.
*/
@Builder
@ToString
protected static class DiffPathSet {
/** Desired files that don't exist on target */
@Singular(value = "copyFile")
Collection<FileStatus> filesToCopy;
/** Files in target that are not desired */
@Singular(value = "deleteFile")
Collection<Path> pathsToDelete;
}
/**
* Represents a source {@link FileStatus} and a {@link Path} destination.
*/
@Data
private static class SourceAndDestination {
private final FileStatus source;
private final Path destination;
}
HiveCopyEntityHelper(HiveDataset dataset, CopyConfiguration configuration, FileSystem targetFs) throws IOException {
try (Closer closer = Closer.create()) {
log.info("Finding copy entities for table " + dataset.table.getCompleteName());
this.eventSubmitter = new EventSubmitter.Builder(dataset.getMetricContext(), "hive.dataset.copy").build();
MultiTimingEvent multiTimer = closer.register(new MultiTimingEvent(this.eventSubmitter, "HiveCopySetup", true));
this.startTime = System.currentTimeMillis();
this.dataset = dataset;
this.configuration = configuration;
this.targetFs = targetFs;
this.targetPathHelper = new HiveTargetPathHelper(this.dataset);
this.enforceFileSizeMatch = configuration.isEnforceFileLengthMatch();
this.hiveRegProps = new HiveRegProps(new State(this.dataset.getProperties()));
this.sourceMetastoreURI =
Optional.fromNullable(this.dataset.getProperties().getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY));
this.targetMetastoreURI =
Optional.fromNullable(this.dataset.getProperties().getProperty(TARGET_METASTORE_URI_KEY));
this.targetClientPool = HiveMetastoreClientPool.get(this.dataset.getProperties(), this.targetMetastoreURI);
this.targetDatabase = Optional.fromNullable(this.dataset.getProperties().getProperty(TARGET_DATABASE_KEY))
.or(this.dataset.table.getDbName());
this.existingEntityPolicy = ExistingEntityPolicy.valueOf(this.dataset.getProperties()
.getProperty(EXISTING_ENTITY_POLICY_KEY, DEFAULT_EXISTING_ENTITY_POLICY).toUpperCase());
this.unmanagedDataPolicy = UnmanagedDataPolicy.valueOf(
this.dataset.getProperties().getProperty(UNMANAGED_DATA_POLICY_KEY, DEFAULT_UNMANAGED_DATA_POLICY)
.toUpperCase());
this.deleteMethod = this.dataset.getProperties().containsKey(DELETE_FILES_ON_DEREGISTER)
? DeregisterFileDeleteMethod
.valueOf(this.dataset.getProperties().getProperty(DELETE_FILES_ON_DEREGISTER).toUpperCase())
: DEFAULT_DEREGISTER_DELETE_METHOD;
try {
this.partitionFilter = this.initializePartitionFilter();
this.hivePartitionExtendedFilter = this.initializeExtendedPartitionFilter();
this.fastPartitionSkip = this.initializePartitionSkipper();
this.fastTableSkip = this.initializeTableSkipper();
} catch (ReflectiveOperationException e) {
closer.close();
throw new IOException(e);
}
Map<String, HiveMetastoreClientPool> namedPools =
ImmutableMap.of(source_client, this.dataset.clientPool, target_client, this.targetClientPool);
multiTimer.nextStage(Stages.GET_TABLES);
try (HiveMetastoreClientPool.MultiClient multiClient = HiveMetastoreClientPool.safeGetClients(namedPools)) {
if (multiClient.getClient(target_client).tableExists(this.targetDatabase, this.dataset.table.getTableName())) {
this.existingTargetTable = Optional.of(new Table(
multiClient.getClient(target_client).getTable(this.targetDatabase, this.dataset.table.getTableName())));
} else {
this.existingTargetTable = Optional.absent();
}
Path targetPath = getTargetLocation(this.targetFs, this.dataset.table.getDataLocation(), Optional.<Partition>absent());
this.dataset.setDatasetPath(targetPath.toUri().getRawPath());
this.targetTable = getTargetTable(this.dataset.table, targetPath);
HiveSpec tableHiveSpec = new SimpleHiveSpec.Builder<>(targetPath)
.withTable(HiveMetaStoreUtils.getHiveTable(this.targetTable.getTTable())).build();
// Constructing CommitStep object for table registration
CommitStep tableRegistrationStep =
new HiveRegisterStep(this.targetMetastoreURI, tableHiveSpec, this.hiveRegProps);
this.tableRegistrationStep = Optional.of(tableRegistrationStep);
if (this.existingTargetTable.isPresent() && this.existingTargetTable.get().isPartitioned()) {
checkPartitionedTableCompatibility(this.targetTable, this.existingTargetTable.get());
}
initializeSourceAndTargetTablePartitions(multiClient);
} catch (TException te) {
closer.close();
throw new IOException("Failed to generate work units for table " + dataset.table.getCompleteName(), te);
}
}
}
/**
* Checks {@value COPY_PARTITION_FILTER_GENERATOR} in configuration to determine which class to use for hive filtering
* Default is to filter based on {@value COPY_PARTITIONS_FILTER_CONSTANT}, a constant regex
* @throws ReflectiveOperationException if the generator class in the configuration is not found
*/
private Optional<String> initializePartitionFilter() throws ReflectiveOperationException {
if (this.dataset.getProperties().containsKey(COPY_PARTITION_FILTER_GENERATOR)) {
PartitionFilterGenerator generator = GobblinConstructorUtils.invokeFirstConstructor(
(Class<PartitionFilterGenerator>) Class.forName(
this.dataset.getProperties().getProperty(COPY_PARTITION_FILTER_GENERATOR)),
Lists.<Object>newArrayList(this.dataset.getProperties()), Lists.newArrayList());
Optional<String> partitionFilter = Optional.of(generator.getFilter(this.dataset));
log.info(String.format("Dynamic partition filter for table %s: %s.", this.dataset.table.getCompleteName(),
partitionFilter.get()));
return partitionFilter;
} else {
return Optional.fromNullable(this.dataset.getProperties().getProperty(COPY_PARTITIONS_FILTER_CONSTANT));
}
}
/**
* Checks {@value HIVE_PARTITION_EXTENDED_FILTER_TYPE} in configuration to initialize more granular filtering class
* Default is to use none
* @throws ReflectiveOperationException if the filter class in the configuration is not found
*/
private Optional<HivePartitionExtendedFilter> initializeExtendedPartitionFilter() throws IOException, ReflectiveOperationException {
if (this.dataset.getProperties().containsKey(HIVE_PARTITION_EXTENDED_FILTER_TYPE)){
String filterType = dataset.getProperties().getProperty(HIVE_PARTITION_EXTENDED_FILTER_TYPE);
Config config = ConfigFactory.parseProperties(this.dataset.getProperties());
return Optional.of(new ClassAliasResolver<>(HivePartitionExtendedFilterFactory.class).resolveClass(filterType).newInstance().createFilter(config));
} else {
return Optional.absent();
}
}
/**
* Checks {@value FAST_PARTITION_SKIP_PREDICATE} in configuration to determine the class used to find which hive partitions to skip
* Default is to skip none
* @throws ReflectiveOperationException if class in configuration is not found
*/
private Optional<Predicate<HivePartitionFileSet>> initializePartitionSkipper() throws ReflectiveOperationException {
return this.dataset.getProperties().containsKey(FAST_PARTITION_SKIP_PREDICATE)
? Optional.of(GobblinConstructorUtils.invokeFirstConstructor(
(Class<Predicate<HivePartitionFileSet>>) Class
.forName(this.dataset.getProperties().getProperty(FAST_PARTITION_SKIP_PREDICATE)),
Lists.<Object> newArrayList(this), Lists.newArrayList()))
: Optional.<Predicate<HivePartitionFileSet>> absent();
}
/**
* Checks {@value FAST_TABLE_SKIP_PREDICATE} in configuration to determine the class used to find which hive tables to skip
* Default is to skip none
* @throws ReflectiveOperationException if class in configuration is not found
*/
private Optional<Predicate<HiveCopyEntityHelper>> initializeTableSkipper() throws ReflectiveOperationException {
return this.dataset.getProperties().containsKey(FAST_TABLE_SKIP_PREDICATE)
? Optional.of(GobblinConstructorUtils.invokeFirstConstructor(
(Class<Predicate<HiveCopyEntityHelper>>) Class
.forName(this.dataset.getProperties().getProperty(FAST_TABLE_SKIP_PREDICATE)),
Lists.newArrayList()))
: Optional.<Predicate<HiveCopyEntityHelper>> absent();
}
/**
* Initializes the corresponding source and target partitions after applying the hive partition filters
* @param multiClient a map of {@link IMetaStoreClient}
* @throws IOException if encountering a hive error when determining partitions
*/
private void initializeSourceAndTargetTablePartitions(HiveMetastoreClientPool.MultiClient multiClient) throws IOException {
if (this.dataset.table.isPartitioned()) {
this.sourcePartitions = HiveUtils.getPartitionsMap(multiClient.getClient(source_client), this.dataset.table, this.partitionFilter,
this.hivePartitionExtendedFilter);
HiveAvroCopyEntityHelper.updatePartitionAttributesIfAvro(this.targetTable, this.sourcePartitions, this);
// Note: this must be mutable, so we copy the map
this.targetPartitions = this.existingTargetTable.isPresent() ? Maps.newHashMap(
HiveUtils.getPartitionsMap(multiClient.getClient(target_client), this.existingTargetTable.get(), this.partitionFilter,
this.hivePartitionExtendedFilter))
: Maps.<List<String>, Partition>newHashMap();
} else {
this.sourcePartitions = Maps.newHashMap();
this.targetPartitions = Maps.newHashMap();
}
}
/**
* See {@link #getCopyEntities(CopyConfiguration, Comparator, PushDownRequestor)}. This method does not pushdown any prioritizer.
*/
Iterator<FileSet<CopyEntity>> getCopyEntities(CopyConfiguration configuration) throws IOException {
return getCopyEntities(configuration, null, null);
}
/**
* Finds all files read by the table and generates {@link CopyEntity}s for duplicating the table. The semantics are as follows:
* 1. Find all valid {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor}. If the table is partitioned, the
* {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor} of the base
* table will be ignored, and we will instead process the {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor} of each partition.
* 2. For each {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor} find all files referred by it.
* 3. Generate a {@link CopyableFile} for each file referred by a {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor}.
* 4. If the table is partitioned, create a file set for each partition.
* 5. Create work units for registering, deregistering partitions / tables, and deleting unnecessary files in the target.
*
* For computation of target locations see {@link HiveTargetPathHelper#getTargetPath}
*/
Iterator<FileSet<CopyEntity>> getCopyEntities(CopyConfiguration configuration, Comparator<FileSet<CopyEntity>> prioritizer,
PushDownRequestor<FileSet<CopyEntity>> requestor) throws IOException {
if (this.dataset.table.isPartitioned()) {
return new PartitionIterator(this.sourcePartitions, configuration, prioritizer, requestor);
} else {
FileSet<CopyEntity> fileSet = new UnpartitionedTableFileSet(this.dataset.table.getCompleteName(), this.dataset, this);
return Iterators.singletonIterator(fileSet);
}
}
/**
* An iterator producing a {@link FileSet} of {@link CopyEntity} for each partition in this table. The files
* are not scanned or the {@link FileSet} materialized until {@link #next} is called.
*/
private class PartitionIterator implements Iterator<FileSet<CopyEntity>> {
static final String DEREGISTER_FILE_SET = "deregister";
private final List<FileSet<CopyEntity>> allFileSets;
private final Iterator<FileSet<CopyEntity>> fileSetIterator;
public PartitionIterator(Map<List<String>, Partition> partitionMap, CopyConfiguration configuration,
Comparator<FileSet<CopyEntity>> prioritizer, PushDownRequestor<FileSet<CopyEntity>> requestor) {
this.allFileSets = generateAllFileSets(partitionMap);
for (FileSet<CopyEntity> fileSet : this.allFileSets) {
fileSet.setRequestor(requestor);
}
if (prioritizer != null) {
Collections.sort(this.allFileSets, prioritizer);
}
this.fileSetIterator = this.allFileSets.iterator();
}
@Override
public boolean hasNext() {
return this.fileSetIterator.hasNext();
}
@Override
public FileSet<CopyEntity> next() {
return this.fileSetIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private List<FileSet<CopyEntity>> generateAllFileSets(Map<List<String>, Partition> partitionMap) {
List<FileSet<CopyEntity>> fileSets = Lists.newArrayList();
for (Map.Entry<List<String>, Partition> partition : partitionMap.entrySet()) {
fileSets.add(fileSetForPartition(partition.getValue()));
HiveCopyEntityHelper.this.targetPartitions.remove(partition.getKey());
}
if (!HiveCopyEntityHelper.this.targetPartitions.isEmpty()) {
fileSets.add(new HivePartitionsDeregisterFileSet(
HiveCopyEntityHelper.this.dataset.getTable().getCompleteName() + DEREGISTER_FILE_SET,
HiveCopyEntityHelper.this.dataset, HiveCopyEntityHelper.this.targetPartitions.values(), HiveCopyEntityHelper.this));
}
return fileSets;
}
private FileSet<CopyEntity> fileSetForPartition(final Partition partition) {
return new HivePartitionFileSet(HiveCopyEntityHelper.this, partition, HiveCopyEntityHelper.this.dataset.getProperties());
}
}
private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
try {
Table targetTable = originTable.copy();
HiveCopyEntityHelper.addMetadataToTargetTable(targetTable, targetLocation, this.targetDatabase, this.startTime);
HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
return targetTable;
} catch (HiveException he) {
throw new IOException(he);
}
}
@VisibleForTesting
static void addMetadataToTargetTable(Table targetTable, Path targetLocation, String targetDatabase, long startTime)
throws IOException {
targetTable.setDbName(targetDatabase);
targetTable.setDataLocation(targetLocation);
/*
* Need to set the table owner as the flow executor
*/
targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(startTime));
/**
* Only set the this constants when source table has it.
*/
targetTable.getTTable().getSd().getSerdeInfo().getParameters()
.computeIfPresent(HiveConstants.PATH, (k,v) -> targetLocation.toString());
targetTable.getTTable().unsetCreateTime();
}
int addPartitionDeregisterSteps(List<CopyEntity> copyEntities, String fileSet, int initialPriority,
Table table, Partition partition) throws IOException {
int stepPriority = initialPriority;
Collection<Path> partitionPaths = Lists.newArrayList();
if (this.deleteMethod == DeregisterFileDeleteMethod.RECURSIVE) {
partitionPaths = Lists.newArrayList(partition.getDataLocation());
} else if (this.deleteMethod == DeregisterFileDeleteMethod.INPUT_FORMAT) {
InputFormat<?, ?> inputFormat = HiveUtils.getInputFormat(partition.getTPartition().getSd());
HiveLocationDescriptor targetLocation = new HiveLocationDescriptor(partition.getDataLocation(), inputFormat,
this.targetFs, this.dataset.getProperties());
partitionPaths = targetLocation.getPaths().keySet();
} else if (this.deleteMethod == DeregisterFileDeleteMethod.NO_DELETE) {
partitionPaths = Lists.newArrayList();
}
if (!partitionPaths.isEmpty()) {
DeleteFileCommitStep deletePaths = DeleteFileCommitStep.fromPaths(this.targetFs, partitionPaths,
this.dataset.getProperties(), table.getDataLocation());
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deletePaths, stepPriority++));
}
PartitionDeregisterStep deregister =
new PartitionDeregisterStep(table.getTTable(), partition.getTPartition(), this.targetMetastoreURI, this.hiveRegProps);
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deregister, stepPriority++));
return stepPriority;
}
@VisibleForTesting
protected int addTableDeregisterSteps(List<CopyEntity> copyEntities, String fileSet, int initialPriority, Table table)
throws IOException {
int stepPriority = initialPriority;
Collection<Path> tablePaths = Lists.newArrayList();
switch (this.getDeleteMethod()) {
case RECURSIVE:
tablePaths = Lists.newArrayList(table.getDataLocation());
break;
case INPUT_FORMAT:
InputFormat<?, ?> inputFormat = HiveUtils.getInputFormat(table.getSd());
HiveLocationDescriptor targetLocation = new HiveLocationDescriptor(table.getDataLocation(), inputFormat,
this.getTargetFs(), this.getDataset().getProperties());
tablePaths = targetLocation.getPaths().keySet();
break;
case NO_DELETE:
tablePaths = Lists.newArrayList();
break;
default:
tablePaths = Lists.newArrayList();
}
if (!tablePaths.isEmpty()) {
DeleteFileCommitStep deletePaths = DeleteFileCommitStep.fromPaths(this.getTargetFs(), tablePaths,
this.getDataset().getProperties(), table.getDataLocation());
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deletePaths, stepPriority++));
}
TableDeregisterStep deregister =
new TableDeregisterStep(table.getTTable(), this.getTargetMetastoreURI(), this.getHiveRegProps());
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), deregister, stepPriority++));
return stepPriority;
}
int addSharedSteps(List<CopyEntity> copyEntities, String fileSet, int initialPriority) {
int priority = initialPriority;
if (this.tableRegistrationStep.isPresent()) {
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), this.tableRegistrationStep.get(),
priority++));
}
return priority;
}
/**
* Compares three entities to figure out which files should be copied and which files should be deleted in the target
* file system.
* @param sourceLocation Represents the source table or partition.
* @param desiredTargetLocation Represents the new desired table or partition.
* @param currentTargetLocation Represents the corresponding existing table or partition in the target hcat if it exists.
* @param partition If present, contains partition information.
* @return A {@link DiffPathSet} with data on files to copy and delete.
* @throws IOException if the copy of this table / partition should be aborted.
*/
@VisibleForTesting
protected static DiffPathSet fullPathDiff(HiveLocationDescriptor sourceLocation,
HiveLocationDescriptor desiredTargetLocation, Optional<HiveLocationDescriptor> currentTargetLocation,
Optional<Partition> partition, MultiTimingEvent multiTimer, HiveCopyEntityHelper helper) throws IOException {
// populate version strategy before analyzing diffs
sourceLocation.populateDataFileVersionStrategy();
desiredTargetLocation.populateDataFileVersionStrategy();
DiffPathSet.DiffPathSetBuilder builder = DiffPathSet.builder();
// check the strategy is not empty
if (!sourceLocation.versionStrategy.isPresent() || !desiredTargetLocation.versionStrategy.isPresent()) {
log.warn("Version strategy doesn't exist ({},{}), cannot handle copy.",
sourceLocation.versionStrategy.isPresent(),
desiredTargetLocation.versionStrategy.isPresent());
return builder.build();
}
// check if the src and dst strategy are the same
if (!sourceLocation.versionStrategy.get().getClass().getName()
.equals(desiredTargetLocation.versionStrategy.get().getClass().getName())) {
log.warn("Version strategy src: {} and dst: {} doesn't match, cannot handle copy.",
sourceLocation.versionStrategy.get().getClass().getName(),
desiredTargetLocation.versionStrategy.get().getClass().getName());
return builder.build();
}
multiTimer.nextStage(Stages.SOURCE_PATH_LISTING);
// These are the paths at the source
Map<Path, FileStatus> sourcePaths = sourceLocation.getPaths();
multiTimer.nextStage(Stages.TARGET_EXISTING_PATH_LISTING);
// These are the paths that the existing target table / partition uses now
Map<Path, FileStatus> targetExistingPaths = currentTargetLocation.isPresent()
? currentTargetLocation.get().getPaths() : Maps.<Path, FileStatus> newHashMap();
multiTimer.nextStage(Stages.DESIRED_PATHS_LISTING);
// These are the paths that exist at the destination and the new table / partition would pick up
Map<Path, FileStatus> desiredTargetExistingPaths;
try {
desiredTargetExistingPaths = desiredTargetLocation.getPaths();
} catch (IOException ioe) {
// Thrown if inputFormat cannot find location in target. Since location doesn't exist, this set is empty.
desiredTargetExistingPaths = Maps.newHashMap();
}
multiTimer.nextStage(Stages.PATH_DIFF);
for (FileStatus sourcePath : sourcePaths.values()) {
// For each source path
Path newPath = helper.getTargetPathHelper().getTargetPath(sourcePath.getPath(), desiredTargetLocation.getFileSystem(), partition, true);
boolean shouldCopy = true;
// Can optimize by using the mod time that has already been fetched
boolean useDirectGetModTime = sourceLocation.versionStrategy.isPresent()
&& sourceLocation.versionStrategy.get().getClass().getName().equals(
ModTimeDataFileVersionStrategy.class.getName());
if (desiredTargetExistingPaths.containsKey(newPath)) {
// If the file exists at the destination, check whether it should be replaced, if not, no need to copy
FileStatus existingTargetStatus = desiredTargetExistingPaths.get(newPath);
Comparable srcVer = useDirectGetModTime ? sourcePath.getModificationTime() :
sourceLocation.versionStrategy.get().getVersion(sourcePath.getPath());
Comparable dstVer = useDirectGetModTime ? existingTargetStatus.getModificationTime() :
desiredTargetLocation.versionStrategy.get().getVersion(existingTargetStatus.getPath());
// destination has higher version, skip the copy
if (srcVer.compareTo(dstVer) <= 0) {
if (!helper.isEnforceFileSizeMatch() || existingTargetStatus.getLen() == sourcePath.getLen()) {
log.debug("Copy from src {} (version:{}) to dst {} (version:{}) can be skipped since file size ({} bytes) is matching",
sourcePath.getPath(), srcVer, existingTargetStatus.getPath(), dstVer, sourcePath.getLen());
shouldCopy = false;
} else {
log.debug("Copy from src {} (version:{}) to dst {} (version:{}) can not be skipped because the file size is not matching or it is enforced by this config: {}",
sourcePath.getPath(), srcVer, existingTargetStatus.getPath(), dstVer, CopyConfiguration.ENFORCE_FILE_LENGTH_MATCH);
}
} else {
log.debug("Copy from src {} (v:{}) to dst {} (v:{}) is needed due to a higher version.",
sourcePath.getPath(), srcVer, existingTargetStatus.getPath(), dstVer);
}
}
if (shouldCopy) {
builder.copyFile(sourcePath);
} else {
// If not copying, we want to keep the file in the target
// at the end of this loop, all files in targetExistingPaths will be marked for deletion, so remove this file
targetExistingPaths.remove(newPath);
desiredTargetExistingPaths.remove(newPath);
}
}
multiTimer.nextStage(Stages.COMPUTE_DELETE_PATHS);
// At this point, targetExistingPaths contains paths managed by target partition / table, but that we don't want
// delete them
for (Path delete : targetExistingPaths.keySet()) {
builder.deleteFile(delete);
desiredTargetExistingPaths.remove(delete);
}
// Now desiredTargetExistingPaths contains paths that we don't want, but which are not managed by the existing
// table / partition.
// Ideally, we shouldn't delete them (they're not managed by Hive), and we don't want to pick
// them up in the new table / partition, so if there are any leftover files, we should abort copying
// this table / partition.
if (desiredTargetExistingPaths.size() > 0 && helper.getUnmanagedDataPolicy() != UnmanagedDataPolicy.DELETE_UNMANAGED_DATA) {
throw new IOException(String.format(
"New table / partition would pick up existing, undesired files in target file system. " + "%s, files %s.",
partition.isPresent() ? partition.get().getCompleteName() : helper.getDataset().getTable().getCompleteName(),
Arrays.toString(desiredTargetExistingPaths.keySet().toArray())));
}
// Unless, the policy requires us to delete such un-managed files - in which case: we will add the leftover files
// to the deletion list.
else if (desiredTargetExistingPaths.size() > 0) {
for (Path delete : desiredTargetExistingPaths.keySet()) {
builder.deleteFile(delete);
}
log.warn(String.format("Un-managed files detected in target file system, however deleting them "
+ "because of the policy: %s Files to be deleted are: %s", UnmanagedDataPolicy.DELETE_UNMANAGED_DATA,
StringUtils.join(desiredTargetExistingPaths.keySet(), ",")));
}
return builder.build();
}
@VisibleForTesting
protected void checkPartitionedTableCompatibility(Table desiredTargetTable, Table existingTargetTable)
throws IOException {
if (!HiveUtils.areTablePathsEquivalent(getTargetFs(), desiredTargetTable.getDataLocation(), existingTargetTable.getDataLocation())) {
throw new HiveTableLocationNotMatchException(desiredTargetTable.getDataLocation(), existingTargetTable.getDataLocation());
}
if (desiredTargetTable.isPartitioned() != existingTargetTable.isPartitioned()) {
throw new IOException(String.format(
"%s: Desired target table %s partitioned, existing target table %s partitioned. Tables are incompatible.",
this.dataset.tableIdentifier, desiredTargetTable.isPartitioned() ? "is" : "is not",
existingTargetTable.isPartitioned() ? "is" : "is not"));
}
if (desiredTargetTable.isPartitioned()
&& !desiredTargetTable.getPartitionKeys().equals(existingTargetTable.getPartitionKeys())) {
throw new IOException(String.format(
"%s: Desired target table has partition keys %s, existing target table has partition keys %s. "
+ "Tables are incompatible.",
this.dataset.tableIdentifier, gson.toJson(desiredTargetTable.getPartitionKeys()),
gson.toJson(existingTargetTable.getPartitionKeys())));
}
}
/**
* Get builders for a {@link CopyableFile} for each file referred to by a {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor}.
*/
List<CopyableFile.Builder> getCopyableFilesFromPaths(Collection<FileStatus> paths,
CopyConfiguration configuration, Optional<Partition> partition) throws IOException {
List<CopyableFile.Builder> builders = Lists.newArrayList();
List<SourceAndDestination> dataFiles = Lists.newArrayList();
Configuration hadoopConfiguration = new Configuration();
FileSystem actualSourceFs = null;
String referenceScheme = null;
String referenceAuthority = null;
for (FileStatus status : paths) {
dataFiles.add(new SourceAndDestination(status, getTargetPathHelper().getTargetPath(status.getPath(), this.targetFs, partition, true)));
}
for (SourceAndDestination sourceAndDestination : dataFiles) {
URI uri = sourceAndDestination.getSource().getPath().toUri();
if (actualSourceFs == null || !StringUtils.equals(referenceScheme, uri.getScheme())
|| !StringUtils.equals(referenceAuthority, uri.getAuthority())) {
actualSourceFs = sourceAndDestination.getSource().getPath().getFileSystem(hadoopConfiguration);
referenceScheme = uri.getScheme();
referenceAuthority = uri.getAuthority();
}
if (!this.dataset.getTableRootPath().isPresent()) {
// The logic for computing ancestor owner and permissions for hive copies depends on tables having a non-glob
// location. Currently, this restriction is also imposed by Hive, so this is not a problem. If this ever changes
// on the Hive side, and we try to copy a table with a glob location, this logic will have to change.
throw new IOException(String.format("Table %s does not have a concrete table root path.",
this.dataset.getTable().getCompleteName()));
}
List<OwnerAndPermission> ancestorOwnerAndPermission =
CopyableFile.resolveReplicatedOwnerAndPermissionsRecursively(actualSourceFs,
sourceAndDestination.getSource().getPath().getParent(), this.dataset.getTableRootPath().get().getParent(), configuration);
builders.add(CopyableFile.fromOriginAndDestination(actualSourceFs, sourceAndDestination.getSource(),
sourceAndDestination.getDestination(), configuration).
ancestorsOwnerAndPermission(ancestorOwnerAndPermission));
}
return builders;
}
/**
* Compute the target location for a Hive location.
* @param path source {@link Path} in Hive location.
* @param partition partition these paths correspond to.
* @return transformed location in the target.
* @throws IOException if cannot generate a single target location.
*/
Path getTargetLocation(FileSystem targetFs, Path path, Optional<Partition> partition) {
return getTargetPathHelper().getTargetPath(path, targetFs, partition, false);
}
protected static Path replacedPrefix(Path sourcePath, Path prefixTobeReplaced, Path prefixReplacement) {
Path sourcePathWithoutSchemeAndAuthority = PathUtils.getPathWithoutSchemeAndAuthority(sourcePath);
Preconditions.checkArgument(PathUtils.isAncestor(prefixTobeReplaced, sourcePathWithoutSchemeAndAuthority),
"When replacing prefix, all locations must be descendants of the prefix. "
+ "The prefix: %s, file location: %s.",
prefixTobeReplaced, sourcePathWithoutSchemeAndAuthority);
Path relativePath = PathUtils.relativizePath(sourcePathWithoutSchemeAndAuthority, prefixTobeReplaced);
Path result = new Path(prefixReplacement, relativePath);
return result;
}
public FileSystem getTargetFileSystem() {
return this.targetFs;
}
DatasetDescriptor getSourceDataset() {
String sourceTable = dataset.getTable().getDbName() + "." + dataset.getTable().getTableName();
URI hiveMetastoreURI = null;
if (sourceMetastoreURI.isPresent()) {
hiveMetastoreURI = URI.create(sourceMetastoreURI.get());
}
DatasetDescriptor sourceDataset =
new DatasetDescriptor(DatasetConstants.PLATFORM_HIVE, hiveMetastoreURI, sourceTable);
sourceDataset.addMetadata(DatasetConstants.FS_URI, dataset.getFs().getUri().toString());
return sourceDataset;
}
DatasetDescriptor getDestinationDataset() {
String destinationTable = this.getTargetDatabase() + "." + this.getTargetTable();
URI hiveMetastoreURI = null;
if (targetMetastoreURI.isPresent()) {
hiveMetastoreURI = URI.create(targetMetastoreURI.get());
}
DatasetDescriptor destinationDataset =
new DatasetDescriptor(DatasetConstants.PLATFORM_HIVE, hiveMetastoreURI, destinationTable);
destinationDataset.addMetadata(DatasetConstants.FS_URI, this.getTargetFs().getUri().toString());
return destinationDataset;
}
}
| 2,623 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HivePartitionsDeregisterFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.copy.CopyEntity;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link HiveFileSet} for deregistering partitions in the target.
*/
@Slf4j
public class HivePartitionsDeregisterFileSet extends HiveFileSet {
private final Collection<Partition> partitionsToDeregister;
private final HiveCopyEntityHelper helper;
public HivePartitionsDeregisterFileSet(String name, HiveDataset dataset, Collection<Partition> partitionsToDeregister,
HiveCopyEntityHelper helper) {
super(name, dataset);
this.partitionsToDeregister = partitionsToDeregister;
this.helper = helper;
}
@Override
protected Collection<CopyEntity> generateCopyEntities()
throws IOException {
List<CopyEntity> deregisterCopyEntities = Lists.newArrayList();
int priority = 1;
for (Partition partition : partitionsToDeregister) {
try {
priority = this.helper.addPartitionDeregisterSteps(deregisterCopyEntities, getName(), priority,
this.helper.getTargetTable(), partition);
} catch (IOException ioe) {
log.error(
"Could not create work unit to deregister partition " + partition.getCompleteName());
}
}
return deregisterCopyEntities;
}
}
| 2,624 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/PathBasedHivePartitionFilterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.util.Properties;
import com.typesafe.config.Config;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.annotation.Alias;
/**
* A path based specific filter factory for generation of {@link PathBasedPartitionFilter}
*/
@Alias("PathPartition")
public class PathBasedHivePartitionFilterFactory implements HivePartitionExtendedFilterFactory {
/* Regular expression components required for filtering partitions by their path */
public static final String HIVE_PARTITION_PATH_FILTER_REGEX = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".pathFilterRegex";
@Override
public HivePartitionExtendedFilter createFilter(Config config){
Properties props = ConfigUtils.configToProperties(config);
return props.containsKey(PathBasedHivePartitionFilterFactory.HIVE_PARTITION_PATH_FILTER_REGEX)?
new PathBasedPartitionFilter(props.getProperty(PathBasedHivePartitionFilterFactory.HIVE_PARTITION_PATH_FILTER_REGEX))
:null;
}
}
| 2,625 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HivePartitionExtendedFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import org.apache.hadoop.hive.metastore.api.Partition;
/**
* The {@link org.apache.hadoop.hive.metastore.IMetaStoreClient} provides `listPartitionsByFilter` method which
* contains a String-type filter parameter. Given the fact that this filter is limited on simple arithmetic filtering
* on partition column, the {@link HivePartitionExtendedFilter} interface extends the semantics of partition filters.
* One example is that you can filter partitions based on the partition location.
*/
public interface HivePartitionExtendedFilter {
/**
* @return If a partition should be accepted or not.
*/
public boolean accept(Partition partition);
}
| 2,626 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.thrift.TException;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import javax.annotation.Nullable;
/**
* Utilities for {@link org.apache.hadoop.hive.ql} classes.
*/
@Slf4j
public class HiveUtils {
/**
* @param client an {@link IMetaStoreClient} for the correct metastore.
* @param table the {@link Table} for which we should get partitions.
* @param filter an optional filter for partitions as would be used in Hive. Can only filter on String columns.
* (e.g. "part = \"part1\"" or "date > \"2015\"".
* @return a map of values to {@link Partition} for input {@link Table} filtered and non-nullified.
*/
public static Map<List<String>, Partition> getPartitionsMap(IMetaStoreClient client, Table table,
Optional<String> filter, Optional<? extends HivePartitionExtendedFilter> hivePartitionExtendedFilterOptional) throws IOException {
return Maps.uniqueIndex(getPartitions(client, table, filter, hivePartitionExtendedFilterOptional), new Function<Partition, List<String>>() {
@Override
public List<String> apply(@Nullable Partition partition) {
if (partition == null) {
return null;
}
return partition.getValues();
}
});
}
/**
* Get a list of {@link Partition}s for the <code>table</code> that matches an optional <code>filter</code>
*
* @param client an {@link IMetaStoreClient} for the correct metastore.
* @param table the {@link Table} for which we should get partitions.
* @param filter an optional filter for partitions as would be used in Hive. Can only filter on String columns.
* (e.g. "part = \"part1\"" or "date > \"2015\"".
* @return a list of {@link Partition}s
*/
public static List<Partition> getPartitions(IMetaStoreClient client, Table table,
Optional<String> filter, Optional<? extends HivePartitionExtendedFilter> hivePartitionExtendedFilterOptional)
throws IOException {
try {
List<Partition> partitions = Lists.newArrayList();
List<org.apache.hadoop.hive.metastore.api.Partition> partitionsList = filter.isPresent()
? client.listPartitionsByFilter(table.getDbName(), table.getTableName(), filter.get(), (short) -1)
: client.listPartitions(table.getDbName(), table.getTableName(), (short) -1);
for (org.apache.hadoop.hive.metastore.api.Partition p : partitionsList) {
if (!hivePartitionExtendedFilterOptional.isPresent() ||
hivePartitionExtendedFilterOptional.get().accept(p)) {
Partition partition = new Partition(table, p);
partitions.add(partition);
}
}
return partitions;
} catch (TException | HiveException te) {
throw new IOException("Hive Error", te);
}
}
/**
* For backward compatibility when PathFilter is injected as a parameter.
* @param client
* @param table
* @param filter
* @return
* @throws IOException
*/
public static List<Partition> getPartitions(IMetaStoreClient client, Table table, Optional<String> filter)
throws IOException {
return getPartitions(client, table, filter, Optional.<HivePartitionExtendedFilter>absent());
}
/**
* @return an instance of the {@link InputFormat} in this {@link StorageDescriptor}.
*/
public static InputFormat<?, ?> getInputFormat(StorageDescriptor sd) throws IOException {
try {
InputFormat<?, ?> inputFormat =
ConstructorUtils.invokeConstructor((Class<? extends InputFormat>) Class.forName(sd.getInputFormat()));
if (inputFormat instanceof JobConfigurable) {
((JobConfigurable) inputFormat).configure(new JobConf(getHadoopConfiguration()));
}
return inputFormat;
} catch (ReflectiveOperationException re) {
throw new IOException("Failed to instantiate input format.", re);
}
}
/**
* Get paths from a Hive location using the provided input format.
*/
public static Set<Path> getPaths(InputFormat<?, ?> inputFormat, Path location) throws IOException {
JobConf jobConf = new JobConf(getHadoopConfiguration());
Set<Path> paths = Sets.newHashSet();
FileInputFormat.addInputPaths(jobConf, location.toString());
InputSplit[] splits = inputFormat.getSplits(jobConf, 1000);
for (InputSplit split : splits) {
if (!(split instanceof FileSplit)) {
throw new IOException("Not a file split. Found " + split.getClass().getName());
}
FileSplit fileSplit = (FileSplit) split;
paths.add(fileSplit.getPath());
}
return paths;
}
private static Configuration getHadoopConfiguration() {
Configuration conf = new Configuration();
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
conf.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
return conf;
}
/**
* @return true if {@link Table} is partitioned.
* @deprecated use {@link Table}'s isPartitioned method directly.
*/
public static boolean isPartitioned(Table table) {
return table.isPartitioned();
}
/**
* First check if the user path is exactly the same as the existing path, if so then just return true
* Otherwise there could be a fs mismatch, resolve this through the filesystem
* If the paths do not exist, then recurse up the parent directories until there is a match, and compare the children
* @param fs User configured filesystem of the target table
* @param userSpecifiedPath user specified path of the copy table location or partition
* @param existingTablePath path of an already registered Hive table or partition
* @return true if the filesystem resolves them to be equivalent, false otherwise
*/
public static boolean areTablePathsEquivalent(FileSystem fs, Path userSpecifiedPath, Path existingTablePath) throws IOException {
if (userSpecifiedPath == null || existingTablePath == null) {
log.error("User path or existing hive table path is null");
return false;
}
if (userSpecifiedPath.toString().equals(existingTablePath.toString())) {
return true;
}
try {
return fs.resolvePath(existingTablePath).equals(fs.resolvePath(userSpecifiedPath));
} catch (FileNotFoundException e) {
// Check the edge case where the hive registration path folder does not exist, but the hive table does exist
// If the child paths aren't equal, then the paths can't be equal
if (!userSpecifiedPath.getName().equals(existingTablePath.getName())) {
return false;
}
// Recurse up the parents to check if there exists a path such that the fs will resolve as equivalent
log.warn(String.format("User specified path %s or existing table path %s does not exist, checking parents for equality",
userSpecifiedPath, existingTablePath));
return areTablePathsEquivalent(fs, userSpecifiedPath.getParent(), existingTablePath.getParent());
}
}
}
| 2,627 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HivePartitionFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.hive.HiveConstants;
import org.apache.gobblin.hive.HiveRegisterStep;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.MultiTimingEvent;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
/**
* A {@link HiveFileSet} for Hive partitions. Creates {@link CopyEntity}s for a single Hive partition.
*/
@Getter
@Slf4j
public class HivePartitionFileSet extends HiveFileSet {
private HiveCopyEntityHelper hiveCopyEntityHelper;
private final Partition partition;
private final Properties properties;
private Optional<Partition> existingTargetPartition;
private final EventSubmitter eventSubmitter;
public HivePartitionFileSet(HiveCopyEntityHelper hiveCopyEntityHelper, Partition partition, Properties properties) {
super(partition.getCompleteName(), hiveCopyEntityHelper.getDataset());
this.hiveCopyEntityHelper = hiveCopyEntityHelper;
this.partition = partition;
this.properties = properties;
this.existingTargetPartition =
Optional.fromNullable(this.hiveCopyEntityHelper.getTargetPartitions().get(this.partition.getValues()));
this.eventSubmitter =
new EventSubmitter.Builder(this.hiveCopyEntityHelper.getDataset().getMetricContext(), "hive.dataset.copy")
.addMetadata("Partition", this.partition.getName()).build();
}
@Override
protected Collection<CopyEntity> generateCopyEntities() throws IOException {
try (Closer closer = Closer.create()) {
MultiTimingEvent multiTimer = closer.register(new MultiTimingEvent(this.eventSubmitter, "PartitionCopy", true));
int stepPriority = 0;
String fileSet = HiveCopyEntityHelper.gson.toJson(this.partition.getValues());
List<CopyEntity> copyEntities = Lists.newArrayList();
stepPriority = hiveCopyEntityHelper.addSharedSteps(copyEntities, fileSet, stepPriority);
multiTimer.nextStage(HiveCopyEntityHelper.Stages.COMPUTE_TARGETS);
Path targetPath = hiveCopyEntityHelper.getTargetLocation(hiveCopyEntityHelper.getTargetFs(),
this.partition.getDataLocation(), Optional.of(this.partition));
Partition targetPartition = getTargetPartition(this.partition, targetPath);
multiTimer.nextStage(HiveCopyEntityHelper.Stages.EXISTING_PARTITION);
if (this.existingTargetPartition.isPresent()) {
hiveCopyEntityHelper.getTargetPartitions().remove(this.partition.getValues());
try {
checkPartitionCompatibility(targetPartition, this.existingTargetPartition.get());
} catch (IOException ioe) {
if (hiveCopyEntityHelper.getExistingEntityPolicy() != HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_PARTITIONS &&
hiveCopyEntityHelper.getExistingEntityPolicy() != HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_TABLE_AND_PARTITIONS) {
log.error("Source and target partitions are not compatible. Aborting copy of partition " + this.partition,
ioe);
// Silence error and continue processing workunits if we allow partial success
if (ConfigUtils.getString(hiveCopyEntityHelper.getConfiguration().getConfig(), ConfigurationKeys.JOB_COMMIT_POLICY_KEY,
JobCommitPolicy.COMMIT_ON_FULL_SUCCESS.toString()).equals(JobCommitPolicy.COMMIT_SUCCESSFUL_TASKS.toString())) {
return Lists.newArrayList();
} else {
throw ioe;
}
}
log.warn("Source and target partitions are not compatible. Will override target partition: " + ioe.getMessage());
log.debug("Incompatibility details: ", ioe);
stepPriority = hiveCopyEntityHelper.addPartitionDeregisterSteps(copyEntities, fileSet, stepPriority,
hiveCopyEntityHelper.getTargetTable(), this.existingTargetPartition.get());
this.existingTargetPartition = Optional.absent();
}
}
multiTimer.nextStage(HiveCopyEntityHelper.Stages.PARTITION_SKIP_PREDICATE);
if (hiveCopyEntityHelper.getFastPartitionSkip().isPresent()
&& hiveCopyEntityHelper.getFastPartitionSkip().get().apply(this)) {
log.info(String.format("Skipping copy of partition %s due to fast partition skip predicate.",
this.partition.getCompleteName()));
return Lists.newArrayList();
}
HiveSpec partitionHiveSpec = new SimpleHiveSpec.Builder<>(targetPath)
.withTable(HiveMetaStoreUtils.getHiveTable(hiveCopyEntityHelper.getTargetTable().getTTable()))
.withPartition(Optional.of(HiveMetaStoreUtils.getHivePartition(targetPartition.getTPartition()))).build();
HiveRegisterStep register = new HiveRegisterStep(hiveCopyEntityHelper.getTargetMetastoreURI(), partitionHiveSpec,
hiveCopyEntityHelper.getHiveRegProps());
copyEntities.add(new PostPublishStep(fileSet, Maps.<String, String> newHashMap(), register, stepPriority++));
multiTimer.nextStage(HiveCopyEntityHelper.Stages.CREATE_LOCATIONS);
HiveLocationDescriptor sourceLocation =
HiveLocationDescriptor.forPartition(this.partition, hiveCopyEntityHelper.getDataset().fs, this.properties);
HiveLocationDescriptor desiredTargetLocation =
HiveLocationDescriptor.forPartition(targetPartition, hiveCopyEntityHelper.getTargetFs(), this.properties);
Optional<HiveLocationDescriptor> existingTargetLocation = this.existingTargetPartition.isPresent()
? Optional.of(HiveLocationDescriptor.forPartition(this.existingTargetPartition.get(),
hiveCopyEntityHelper.getTargetFs(), this.properties))
: Optional.<HiveLocationDescriptor> absent();
multiTimer.nextStage(HiveCopyEntityHelper.Stages.FULL_PATH_DIFF);
HiveCopyEntityHelper.DiffPathSet
diffPathSet = HiveCopyEntityHelper.fullPathDiff(sourceLocation, desiredTargetLocation, existingTargetLocation,
Optional.<Partition> absent(), multiTimer, hiveCopyEntityHelper);
multiTimer.nextStage(HiveCopyEntityHelper.Stages.CREATE_DELETE_UNITS);
if (diffPathSet.pathsToDelete.size() > 0) {
DeleteFileCommitStep deleteStep = DeleteFileCommitStep.fromPaths(hiveCopyEntityHelper.getTargetFs(),
diffPathSet.pathsToDelete, hiveCopyEntityHelper.getDataset().properties);
copyEntities.add(new PrePublishStep(fileSet, Maps.<String, String> newHashMap(), deleteStep, stepPriority++));
}
multiTimer.nextStage(HiveCopyEntityHelper.Stages.CREATE_COPY_UNITS);
for (CopyableFile.Builder builder : hiveCopyEntityHelper.getCopyableFilesFromPaths(diffPathSet.filesToCopy,
hiveCopyEntityHelper.getConfiguration(), Optional.of(this.partition))) {
CopyableFile fileEntity =
builder.fileSet(fileSet).checksum(new byte[0]).datasetOutputPath(desiredTargetLocation.location.toString())
.build();
DatasetDescriptor sourceDataset = this.hiveCopyEntityHelper.getSourceDataset();
PartitionDescriptor source = new PartitionDescriptor(partition.getName(), sourceDataset);
fileEntity.setSourceData(source);
DatasetDescriptor destinationDataset = this.hiveCopyEntityHelper.getDestinationDataset();
Partition destinationPartition =
this.existingTargetPartition.isPresent() ? this.existingTargetPartition.get() : partition;
PartitionDescriptor destination =
new PartitionDescriptor(destinationPartition.getName(), destinationDataset);
fileEntity.setDestinationData(destination);
copyEntities.add(fileEntity);
}
log.info("Created {} copy entities for partition {}", copyEntities.size(), this.partition.getCompleteName());
return copyEntities;
}
}
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
try {
Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(this.hiveCopyEntityHelper.getStartTime()));
targetPartition.setLocation(targetLocation.toString());
/**
* Only set the this constants when source partition has it.
*/
targetPartition.getTPartition().getSd().getSerdeInfo().getParameters()
.computeIfPresent(HiveConstants.PATH, (k,v) -> targetLocation.toString());
targetPartition.getTPartition().unsetCreateTime();
return targetPartition;
} catch (HiveException he) {
throw new IOException(he);
}
}
private void checkPartitionCompatibility(Partition desiredTargetPartition, Partition existingTargetPartition)
throws IOException {
if (!HiveUtils.areTablePathsEquivalent(hiveCopyEntityHelper.getTargetFs(), desiredTargetPartition.getDataLocation(),
existingTargetPartition.getDataLocation())) {
throw new HiveTableLocationNotMatchException(desiredTargetPartition.getDataLocation(), existingTargetPartition.getDataLocation());
}
}
}
| 2,628 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HivePartitionExtendedFilterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.typesafe.config.Config;
/**
* A factory for {@link HivePartitionExtendedFilter} instances.
*/
public interface HivePartitionExtendedFilterFactory {
/**
* @param config Config object contains partition condition.
* e.g. config could contain several keywords. Those partitions with any of these keywords
* appearing in its path will be filtered out.
* @return Return a partition filter based filtering condition contained in Config.
*/
HivePartitionExtendedFilter createFilter(Config config);
}
| 2,629 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.google.common.base.Throwables;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nonnull;
import lombok.Data;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Table;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientCache;
import org.apache.gobblin.config.client.ConfigClientUtils;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.hive.HiveConfigClientUtils;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.sla.SlaEventSubmitter;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Finds {@link HiveDataset}s. Will look for tables in a database using a {@link WhitelistBlacklist},
* and creates a {@link HiveDataset} for each one.
*/
@Slf4j
public class HiveDatasetFinder implements IterableDatasetFinder<HiveDataset> {
public static final String HIVE_DATASET_PREFIX = "hive.dataset";
public static final String HIVE_METASTORE_URI_KEY = HIVE_DATASET_PREFIX + ".hive.metastore.uri";
public static final String DB_KEY = HIVE_DATASET_PREFIX + ".database";
public static final String TABLE_PATTERN_KEY = HIVE_DATASET_PREFIX + ".table.pattern";
public static final String DEFAULT_TABLE_PATTERN = "*";
public static final String TABLE_FILTER = HIVE_DATASET_PREFIX + ".tableFilter";
/*
* By setting the prefix, only config keys with this prefix will be used to build a HiveDataset.
* By passing scoped configurations the same config keys can be used in different contexts.
*
* E.g
* 1. For CopySource, prefix is gobblin.dataset.copy
* 2. For avro to Orc conversion, prefix is hive.dataset.conversion.avro.orc
* 3. For retention, prefix is gobblin.retention.
*
*/
public static final String HIVE_DATASET_CONFIG_PREFIX_KEY = "hive.dataset.configPrefix";
private static final String DEFAULT_HIVE_DATASET_CONIFG_PREFIX = StringUtils.EMPTY;
public static final String HIVE_DATASET_IS_BLACKLISTED_KEY = "is.blacklisted";
private static final boolean DEFAULT_HIVE_DATASET_IS_BLACKLISTED_KEY = false;
/**
* This is an optional key.
* The fully qualified name of a {@link Function} class which returns the relative uri of a dataset in the config store
*/
public static final String CONFIG_STORE_DATASET_URI_BUILDER_CLASS = "org.apache.gobblin.config.management.datasetUriBuilderClass";
// Event names
private static final String DATASET_FOUND = "DatasetFound";
private static final String DATASET_ERROR = "DatasetError";
private static final String FAILURE_CONTEXT = "FailureContext";
@Getter
protected final Properties properties;
protected final HiveMetastoreClientPool clientPool;
protected final FileSystem fs;
private final WhitelistBlacklist whitelistBlacklist;
private final Optional<EventSubmitter> eventSubmitter;
protected Optional<String> configStoreUri;
protected final Function<Table, String> configStoreDatasetUriBuilder;
protected final Optional<Predicate<Table>> tableFilter;
protected final String datasetConfigPrefix;
protected final ConfigClient configClient;
private final Config jobConfig;
public HiveDatasetFinder(FileSystem fs, Properties properties) throws IOException {
this(fs, properties, createClientPool(properties));
}
protected HiveDatasetFinder(FileSystem fs, Properties properties, ConfigClient configClient) throws IOException {
this(fs, properties, createClientPool(properties), null, configClient);
}
public HiveDatasetFinder(FileSystem fs, Properties properties, EventSubmitter eventSubmitter) throws IOException {
this(fs, properties, createClientPool(properties), eventSubmitter);
}
protected HiveDatasetFinder(FileSystem fs, Properties properties, HiveMetastoreClientPool clientPool)
throws IOException {
this(fs, properties, clientPool, null);
}
protected HiveDatasetFinder(FileSystem fs, Properties properties, HiveMetastoreClientPool clientPool,
EventSubmitter eventSubmitter) throws IOException {
this(fs, properties, clientPool, eventSubmitter, ConfigClientCache.getClient(VersionStabilityPolicy.STRONG_LOCAL_STABILITY));
}
@SuppressWarnings("unchecked")
//SupressWarning justification : CONFIG_STORE_DATASET_URI_BUILDER_CLASS must be of type Function<DbAndTable, String>.
//It is safe to throw RuntimeException otherwise
protected HiveDatasetFinder(FileSystem fs, Properties properties, HiveMetastoreClientPool clientPool,
EventSubmitter eventSubmitter, ConfigClient configClient) throws IOException {
this.properties = properties;
this.clientPool = clientPool;
this.fs = fs;
String whitelistKey = HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST;
Preconditions.checkArgument(properties.containsKey(DB_KEY) || properties.containsKey(whitelistKey),
String.format("Must specify %s or %s.", DB_KEY, whitelistKey));
Config config = ConfigFactory.parseProperties(properties);
if (properties.containsKey(DB_KEY)) {
this.whitelistBlacklist = new WhitelistBlacklist(this.properties.getProperty(DB_KEY) + "."
+ this.properties.getProperty(TABLE_PATTERN_KEY, DEFAULT_TABLE_PATTERN), "");
} else {
this.whitelistBlacklist = new WhitelistBlacklist(config.getConfig(HIVE_DATASET_PREFIX));
}
this.eventSubmitter = Optional.fromNullable(eventSubmitter);
this.configStoreUri = StringUtils.isNotBlank(properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) ?
Optional.of(properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) : Optional.<String>absent();
if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED,
ConfigurationKeys.DEFAULT_CONFIG_MANAGEMENT_STORE_ENABLED))) {
this.configStoreUri = Optional.<String>absent();
}
this.datasetConfigPrefix = properties.getProperty(HIVE_DATASET_CONFIG_PREFIX_KEY, DEFAULT_HIVE_DATASET_CONIFG_PREFIX);
this.configClient = configClient;
try {
this.configStoreDatasetUriBuilder =
properties.containsKey(CONFIG_STORE_DATASET_URI_BUILDER_CLASS) ? (Function<Table, String>) ConstructorUtils
.invokeConstructor(Class.forName(properties.getProperty(CONFIG_STORE_DATASET_URI_BUILDER_CLASS)))
: DEFAULT_CONFIG_STORE_DATASET_URI_BUILDER;
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
this.jobConfig = ConfigUtils.propertiesToConfig(properties);
String tableFilterPredicate = properties.getProperty(TABLE_FILTER);
if (StringUtils.isNotEmpty(tableFilterPredicate)) {
this.tableFilter = Optional.of((Predicate<Table>)GobblinConstructorUtils.invokeConstructor(
Predicate.class, tableFilterPredicate, properties));
} else {
this.tableFilter = Optional.absent();
}
}
protected static HiveMetastoreClientPool createClientPool(Properties properties) throws IOException {
return HiveMetastoreClientPool.get(properties,
Optional.fromNullable(properties.getProperty(HIVE_METASTORE_URI_KEY)));
}
/**
* Get all tables in db with given table pattern.
*/
public Collection<DbAndTable> getTables() throws IOException {
List<DbAndTable> tables = Lists.newArrayList();
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
Iterable<String> databases = Iterables.filter(client.get().getAllDatabases(), new Predicate<String>() {
@Override
public boolean apply(String db) {
return HiveDatasetFinder.this.whitelistBlacklist.acceptDb(db);
}
});
for (final String db : databases) {
Iterable<String> tableNames = Iterables.filter(client.get().getAllTables(db), new Predicate<String>() {
@Override
public boolean apply(String table) {
return HiveDatasetFinder.this.whitelistBlacklist.acceptTable(db, table);
}
});
for (String tableName : tableNames) {
tables.add(new DbAndTable(db, tableName));
}
}
} catch (Exception exc) {
throw new IOException(exc);
}
return tables;
}
@Data
public static class DbAndTable {
private final String db;
private final String table;
@Override
public String toString() {
return String.format("%s.%s", this.db, this.table);
}
}
@Override
public List<HiveDataset> findDatasets() throws IOException {
return Lists.newArrayList(getDatasetsIterator());
}
@Override
public Iterator<HiveDataset> getDatasetsIterator() throws IOException {
return new AbstractIterator<HiveDataset>() {
private Iterator<DbAndTable> tables = getTables().iterator();
@Override
protected HiveDataset computeNext() {
while (this.tables.hasNext()) {
DbAndTable dbAndTable = this.tables.next();
try (AutoReturnableObject<IMetaStoreClient> client = HiveDatasetFinder.this.clientPool.getClient()) {
Table table = client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable());
if (tableFilter.isPresent() && !tableFilter.get().apply(table)) {
continue;
}
Config datasetConfig = getDatasetConfig(table);
if (ConfigUtils.getBoolean(datasetConfig, HIVE_DATASET_IS_BLACKLISTED_KEY, DEFAULT_HIVE_DATASET_IS_BLACKLISTED_KEY)) {
continue;
}
if (HiveDatasetFinder.this.eventSubmitter.isPresent()) {
SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString())
.eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_FOUND).build().submit();
}
return createHiveDataset(table, datasetConfig);
} catch (IllegalArgumentException e) {
Throwables.propagate(e);
} catch (Throwable t) {
log.error(String.format("Failed to create HiveDataset for table %s.%s", dbAndTable.getDb(), dbAndTable.getTable()), t);
if (HiveDatasetFinder.this.eventSubmitter.isPresent()) {
SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString())
.eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_ERROR)
.additionalMetadata(FAILURE_CONTEXT, t.toString()).build().submit();
}
}
}
return endOfData();
}
};
}
/**
* @deprecated Use {@link #createHiveDataset(Table, Config)} instead
*/
@Deprecated
protected HiveDataset createHiveDataset(Table table) throws IOException {
return createHiveDataset(table, ConfigFactory.empty());
}
protected HiveDataset createHiveDataset(Table table, Config datasetConfig) throws IOException {
return new HiveDataset(this.fs, this.clientPool, new org.apache.hadoop.hive.ql.metadata.Table(table), this.properties, datasetConfig);
}
@Override
public Path commonDatasetRoot() {
return new Path("/");
}
/**
* Gets the {@link Config} for this <code>dbAndTable</code>.
* Cases:
* <ul>
* <li>If {@link #configStoreUri} is available it gets the dataset config from the config store at this uri
* <li>If {@link #configStoreUri} is not available it uses the job config as dataset config
* <li>If {@link #datasetConfigPrefix} is specified, only configs with this prefix is returned
* <li>If {@link #datasetConfigPrefix} is not specified, all configs are returned
* </ul>
* @param table of the dataset to get config
* @return the {@link Config} for <code>dbAndTable</code>
*/
private Config getDatasetConfig(Table table) throws ConfigStoreFactoryDoesNotExistsException,
ConfigStoreCreationException, URISyntaxException {
Config datasetConfig;
Optional<Config> runtimeConfig = ConfigClientUtils.getOptionalRuntimeConfig(properties);
// Config store enabled
if (this.configStoreUri.isPresent()) {
if (runtimeConfig.isPresent()) {
datasetConfig = this.configClient.getConfig(
this.configStoreUri.get() + Path.SEPARATOR + this.configStoreDatasetUriBuilder.apply(table),
runtimeConfig);
} else {
datasetConfig = this.configClient.getConfig(
this.configStoreUri.get() + Path.SEPARATOR + this.configStoreDatasetUriBuilder.apply(table));
}
// If config store is not enabled use job config
} else {
datasetConfig = this.jobConfig;
}
return StringUtils.isBlank(this.datasetConfigPrefix) ? datasetConfig : ConfigUtils.getConfig(datasetConfig,
this.datasetConfigPrefix, ConfigFactory.empty());
}
private static final Function<Table, String> DEFAULT_CONFIG_STORE_DATASET_URI_BUILDER =
new Function<Table, String>() {
@Override
public String apply(@Nonnull Table table) {
return HiveConfigClientUtils.getDatasetUri(table);
}
};
}
| 2,630 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/UnpartitionedTableFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.metrics.event.MultiTimingEvent;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link HiveFileSet} that generates {@link CopyEntity}s for an unpartitioned Hive table.
*/
@Slf4j
public class UnpartitionedTableFileSet extends HiveFileSet {
private final HiveCopyEntityHelper helper;
public UnpartitionedTableFileSet(String name, HiveDataset dataset, HiveCopyEntityHelper helper) {
super(name, dataset);
this.helper = helper;
}
// Suppress warnings for "stepPriority++" in the PrePublishStep constructor, as stepPriority may be used later
@SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
@Override
protected Collection<CopyEntity> generateCopyEntities()
throws IOException {
MultiTimingEvent multiTimer = new MultiTimingEvent(this.helper.getEventSubmitter(), "TableCopy", true);
int stepPriority = 0;
String fileSet = getTable().getTableName();
List<CopyEntity> copyEntities = Lists.newArrayList();
Optional<Table> existingTargetTable = this.helper.getExistingTargetTable();
if (existingTargetTable.isPresent()) {
// Use update policy if user defined table path for their copy location does not match pre-existing table path
if (!HiveUtils.areTablePathsEquivalent(this.helper.getTargetFs(), this.helper.getTargetTable().getDataLocation(),
existingTargetTable.get().getDataLocation())) {
switch (this.helper.getExistingEntityPolicy()){
case UPDATE_TABLE:
// Update the location of files while keep the existing table entity.
log.warn("Source table will not be deregistered while file location has been changed, update source table's"
+ " file location to" + this.helper.getTargetTable().getDataLocation());
existingTargetTable = Optional.absent();
break ;
case REPLACE_TABLE:
case REPLACE_TABLE_AND_PARTITIONS:
// Required to de-register the original table.
log.warn("Source and target table are not compatible. Will override target table " + existingTargetTable.get()
.getDataLocation());
stepPriority = this.helper.addTableDeregisterSteps(copyEntities, fileSet, stepPriority, this.helper.getTargetTable());
existingTargetTable = Optional.absent();
break ;
default:
log.error("Source and target table are not compatible. Aborting copy of table " + this.helper.getTargetTable());
multiTimer.close();
throw new HiveTableLocationNotMatchException(this.helper.getTargetTable().getDataLocation(),
existingTargetTable.get().getDataLocation());
}
}
}
stepPriority = this.helper.addSharedSteps(copyEntities, fileSet, stepPriority);
HiveLocationDescriptor sourceLocation =
HiveLocationDescriptor.forTable(getTable(), getHiveDataset().getFs(), getHiveDataset().getProperties());
HiveLocationDescriptor desiredTargetLocation =
HiveLocationDescriptor.forTable(this.helper.getTargetTable(), this.helper.getTargetFs(), getHiveDataset().getProperties());
Optional<HiveLocationDescriptor> existingTargetLocation = existingTargetTable.isPresent() ? Optional.of(
HiveLocationDescriptor.forTable(existingTargetTable.get(), this.helper.getTargetFs(), getHiveDataset().getProperties()))
: Optional.<HiveLocationDescriptor> absent();
if (this.helper.getFastTableSkip().isPresent() && this.helper.getFastTableSkip().get().apply(this.helper)) {
log.info(String.format("Skipping copy of table %s due to fast table skip predicate.", getTable().getDbName()+"." + getTable().getTableName()));
multiTimer.close();
return Lists.newArrayList();
}
HiveCopyEntityHelper.DiffPathSet
diffPathSet = HiveCopyEntityHelper.fullPathDiff(sourceLocation, desiredTargetLocation, existingTargetLocation,
Optional.<Partition> absent(), multiTimer, this.helper);
multiTimer.nextStage(HiveCopyEntityHelper.Stages.FULL_PATH_DIFF);
// Could used to delete files for the existing snapshot
DeleteFileCommitStep deleteStep =
DeleteFileCommitStep.fromPaths(this.helper.getTargetFs(), diffPathSet.pathsToDelete, getHiveDataset().getProperties());
copyEntities.add(new PrePublishStep(fileSet, Maps.<String, String> newHashMap(), deleteStep, stepPriority++));
for (CopyableFile.Builder builder : this.helper.getCopyableFilesFromPaths(diffPathSet.filesToCopy, this.helper.getConfiguration(),
Optional.<Partition> absent())) {
CopyableFile fileEntity =
builder.fileSet(fileSet).datasetOutputPath(desiredTargetLocation.location.toString()).build();
fileEntity.setSourceData(this.helper.getSourceDataset());
fileEntity.setDestinationData(this.helper.getDestinationDataset());
copyEntities.add(fileEntity);
}
multiTimer.close();
return copyEntities;
}
}
| 2,631 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveLocationDescriptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.mapred.InputFormat;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import org.apache.gobblin.data.management.copy.RecursivePathFinder;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filesystem.DataFileVersionStrategy;
/**
* Contains data for a Hive location as well as additional data if {@link #HIVE_DATASET_COPY_ADDITIONAL_PATHS_RECURSIVELY_ENABLED} set to true.
*/
@Data
@Slf4j
public class HiveLocationDescriptor {
public static final String HIVE_DATASET_COPY_ADDITIONAL_PATHS_RECURSIVELY_ENABLED =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.additional.paths.recursively.enabled";
public static final String HIVE_LOCATION_LISTING_METHOD =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.location.listing.method";
public static final String SKIP_HIDDEN_PATHS =
HiveDatasetFinder.HIVE_DATASET_PREFIX + ".copy.locations.listing.skipHiddenPaths";
public static final String DEFAULT_SKIP_HIDDEN_PATHS = Boolean.toString(false);
public static final String DEFAULT_HIVE_LOCATION_LISTING_METHOD = PathFindingMethod.INPUT_FORMAT.name();
public enum PathFindingMethod {
INPUT_FORMAT, RECURSIVE
}
protected final Path location;
protected final InputFormat<?, ?> inputFormat;
protected final FileSystem fileSystem;
protected final Properties properties;
protected Optional<DataFileVersionStrategy> versionStrategy;
protected void populateDataFileVersionStrategy() {
try {
this.versionStrategy = Optional.of(DataFileVersionStrategy
.instantiateDataFileVersionStrategy(fileSystem, ConfigUtils.propertiesToConfig(properties)));
} catch (IOException e) {
log.error("Cannot generate version strategy due to {}", e);
}
}
public Map<Path, FileStatus> getPaths() throws IOException {
PathFindingMethod pathFindingMethod = PathFindingMethod.valueOf(
this.properties.getProperty(HIVE_LOCATION_LISTING_METHOD, DEFAULT_HIVE_LOCATION_LISTING_METHOD).toUpperCase());
Map<Path, FileStatus> result = Maps.newHashMap();
if (pathFindingMethod == PathFindingMethod.INPUT_FORMAT) {
for (Path path : HiveUtils.getPaths(this.inputFormat, this.location)) {
result.put(path, this.fileSystem.getFileStatus(path));
}
boolean useHiveLocationDescriptorWithAdditionalData =
Boolean.valueOf(this.properties.getProperty(HIVE_DATASET_COPY_ADDITIONAL_PATHS_RECURSIVELY_ENABLED, "false"));
if (useHiveLocationDescriptorWithAdditionalData) {
if (PathUtils.isGlob(this.location)) {
throw new IOException("can not get additional data for glob pattern path " + this.location);
}
RecursivePathFinder finder = new RecursivePathFinder(this.fileSystem, this.location, this.properties);
for (FileStatus status : finder.getPaths(false)) {
result.put(status.getPath(), status);
}
}
return result;
} else if (pathFindingMethod == PathFindingMethod.RECURSIVE) {
if (PathUtils.isGlob(this.location)) {
throw new IOException("Cannot use recursive listing for globbed locations.");
}
boolean skipHiddenPaths = Boolean.parseBoolean(this.properties.getProperty(SKIP_HIDDEN_PATHS, DEFAULT_SKIP_HIDDEN_PATHS));
RecursivePathFinder finder = new RecursivePathFinder(this.fileSystem, this.location, this.properties);
for (FileStatus status : finder.getPaths(skipHiddenPaths)) {
result.put(status.getPath(), status);
}
return result;
} else {
throw new IOException("Hive location listing method not recognized: " + pathFindingMethod);
}
}
public static HiveLocationDescriptor forTable(Table table, FileSystem fs, Properties properties) throws IOException {
return new HiveLocationDescriptor(table.getDataLocation(), HiveUtils.getInputFormat(table.getTTable().getSd()), fs, properties);
}
public static HiveLocationDescriptor forPartition(Partition partition, FileSystem fs, Properties properties) throws IOException {
return new HiveLocationDescriptor(partition.getDataLocation(),
HiveUtils.getInputFormat(partition.getTPartition().getSd()), fs, properties);
}
}
| 2,632 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/PartitionFilterGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
/**
* Generates a partition filter dynamically.
*/
public interface PartitionFilterGenerator {
public String getFilter(HiveDataset hiveDataset);
}
| 2,633 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/avro/HiveAvroCopyEntityHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive.avro;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper;
import org.apache.gobblin.util.PathUtils;
/**
* Update avro related entries in creating {@link org.apache.gobblin.data.management.copy.CopyEntity}s for copying a Hive table.
*/
@Slf4j
public class HiveAvroCopyEntityHelper {
private static final String HIVE_TABLE_AVRO_SCHEMA_URL = "avro.schema.url";
/**
* Currently updated the {@link #HIVE_TABLE_AVRO_SCHEMA_URL} location for new hive table
* @param targetTable, new Table to be registered in hive
* @throws IOException
*/
public static void updateTableAttributesIfAvro(Table targetTable, HiveCopyEntityHelper hiveHelper) throws IOException {
if (isHiveTableAvroType(targetTable)) {
updateAvroSchemaURL(targetTable.getCompleteName(), targetTable.getTTable().getSd(), hiveHelper);
}
}
/**
* Currently updated the {@link #HIVE_TABLE_AVRO_SCHEMA_URL} location for new hive partitions
* @param targetTable, new Table to be registered in hive
* @param sourcePartitions, source partitions
* @throws IOException
*/
public static void updatePartitionAttributesIfAvro(Table targetTable, Map<List<String>, Partition> sourcePartitions, HiveCopyEntityHelper hiveHelper) throws IOException {
if (isHiveTableAvroType(targetTable)) {
for (Map.Entry<List<String>, Partition> partition : sourcePartitions.entrySet()) {
updateAvroSchemaURL(partition.getValue().getCompleteName(), partition.getValue().getTPartition().getSd(), hiveHelper);
}
}
}
/**
* @param entity, name of the entity to be changed, e.g. hive table or partition
* @param sd, StorageDescriptor of the entity
*/
public static void updateAvroSchemaURL(String entity, StorageDescriptor sd, HiveCopyEntityHelper hiveHelper) {
String oldAvroSchemaURL = sd.getSerdeInfo().getParameters().get(HIVE_TABLE_AVRO_SCHEMA_URL);
if (oldAvroSchemaURL != null) {
Path oldAvroSchemaPath = new Path(oldAvroSchemaURL);
URI sourceFileSystemURI = hiveHelper.getDataset().getFs().getUri();
if (PathUtils.isAbsoluteAndSchemeAuthorityNull(oldAvroSchemaPath)
|| (oldAvroSchemaPath.toUri().getScheme().equals(sourceFileSystemURI.getScheme())
&& oldAvroSchemaPath.toUri().getAuthority().equals(sourceFileSystemURI.getAuthority()))) {
String newAvroSchemaURL = hiveHelper.getTargetPathHelper().getTargetPath(oldAvroSchemaPath, hiveHelper.getTargetFileSystem(),
Optional.<Partition>absent(), true).toString();
sd.getSerdeInfo().getParameters().put(HIVE_TABLE_AVRO_SCHEMA_URL, newAvroSchemaURL);
log.info(String.format("For entity %s, change %s from %s to %s", entity,
HIVE_TABLE_AVRO_SCHEMA_URL, oldAvroSchemaURL, newAvroSchemaURL));
}
}
}
/**
* Tell whether a hive table is actually an Avro table
* @param table a hive {@link Table}
* @return true if it is a hive table
*/
public static boolean isHiveTableAvroType(Table table) {
String serializationLib = table.getTTable().getSd().getSerdeInfo().getSerializationLib();
String inputFormat = table.getTTable().getSd().getInputFormat();
String outputFormat = table.getTTable().getSd().getOutputFormat();
return inputFormat.endsWith(AvroContainerInputFormat.class.getSimpleName())
|| outputFormat.endsWith(AvroContainerOutputFormat.class.getSimpleName())
|| serializationLib.endsWith(AvroSerDe.class.getSimpleName());
}
}
| 2,634 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/filter/DateRangePartitionFilterGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive.filter;
import java.util.Arrays;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.PartitionFilterGenerator;
/**
* Filters hive partitions using BETWEEN START AND END date range.
* Requires PARTITION_COLUMN, START_DATE, END_DATE
*
* <p>
* The generated filter is of the form "datePartition between 'start_date' and 'end_date' ".
* </p>
*/
@Slf4j
public class DateRangePartitionFilterGenerator implements PartitionFilterGenerator {
public static final String PARTITION_COLUMN = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.column";
public static final String START_DATE = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.startdate";
public static final String END_DATE = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.enddate";
private final Properties prop;
public DateRangePartitionFilterGenerator(Properties properties) {
this.prop = (properties == null) ? System.getProperties(): properties;
}
@Override
public String getFilter(HiveDataset hiveDataset) {
if (isValidConfig()) {
String partitionColumn = this.prop.getProperty(PARTITION_COLUMN);
String startDate = this.prop.getProperty(START_DATE);
String endDate = this.prop.getProperty(END_DATE);
String partitionFilter =String.format("%s between \"%s\" and \"%s\"", partitionColumn, startDate, endDate);
log.info(String.format("Getting partitions for %s using partition filter %s", ((hiveDataset == null) ? "null" : hiveDataset.getTable()
.getCompleteName()), partitionFilter));
return partitionFilter;
} else {
log.error(DateRangePartitionFilterGenerator.class.getName()
+ " requires the following properties " + Arrays.toString(new String[]{PARTITION_COLUMN, START_DATE, END_DATE}));
return null;
}
}
private boolean isValidConfig() {
return this.prop.containsKey(DateRangePartitionFilterGenerator.PARTITION_COLUMN)
&& this.prop.containsKey(DateRangePartitionFilterGenerator.START_DATE)
&& this.prop.containsKey(DateRangePartitionFilterGenerator.END_DATE);
}
}
| 2,635 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/filter/LookbackPartitionFilterGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive.filter;
import java.util.Arrays;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.PartitionFilterGenerator;
/**
* Filters partitions according to a lookback period. The partition values must be time formatted. User must specify
* the partition column, lookback period (as ISO 8601 period), and datetime format of the column values.
*
* <p>
* The generated filter is of the form "datePartition >= 'date'", so the column must be of string type and its format
* must be such that lexycographical string and date ordering are compatible.
* </p>
*/
@Slf4j
public class LookbackPartitionFilterGenerator implements PartitionFilterGenerator {
public static final String PARTITION_COLUMN = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.column";
public static final String LOOKBACK = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.lookback";
public static final String DATETIME_FORMAT = HiveDatasetFinder.HIVE_DATASET_PREFIX + ".partition.filter.datetime.format";
private final Properties prop;
public LookbackPartitionFilterGenerator(Properties properties) {
this.prop = (properties == null) ? System.getProperties(): properties;
}
@Override
public String getFilter(HiveDataset hiveDataset) {
if (isValidConfig()) {
String partitionColumn = this.prop.getProperty(PARTITION_COLUMN);
Period lookback = Period.parse(this.prop.getProperty(LOOKBACK));
DateTimeFormatter formatter = DateTimeFormat.forPattern(this.prop.getProperty(DATETIME_FORMAT));
DateTime limitDate = (new DateTime()).minus(lookback);
String partitionFilter = String.format("%s >= \"%s\"", partitionColumn, formatter.print(limitDate));
log.info(String.format("Getting partitions for %s using partition filter %s", ((hiveDataset == null) ? "null" : hiveDataset.getTable()
.getCompleteName()), partitionFilter));
return partitionFilter;
} else {
log.error(LookbackPartitionFilterGenerator.class.getName()
+ " requires the following properties " + Arrays.toString(new String[]{PARTITION_COLUMN, LOOKBACK, DATETIME_FORMAT}));
return null;
}
}
private boolean isValidConfig() {
return this.prop.containsKey(LookbackPartitionFilterGenerator.PARTITION_COLUMN)
&& this.prop.containsKey(LookbackPartitionFilterGenerator.DATETIME_FORMAT)
&& this.prop.containsKey(LookbackPartitionFilterGenerator.LOOKBACK);
}
}
| 2,636 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.apache.gobblin.util.filesystem.ModTimeDataFileVersionStrategy;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import org.apache.gobblin.util.filesystem.DataFileVersionStrategy;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
* Extends {@link CopyableDataset} to represent data replication dataset based on {@link Config}
*
* Detail logics
* <ul>
* <li>Picked the preferred topology
* <li>Based on current running cluster and CopyMode (push or pull) pick the routes
* <li>Based on optimization policy to pick the CopyFrom and CopyTo pair
* <li>Generated the CopyEntity based on CopyFrom and CopyTo pair
* </ul>
* @author mitu
*
*/
@Slf4j
public class ConfigBasedDataset implements CopyableDataset {
private final Properties props;
private final CopyRoute copyRoute;
private final ReplicationConfiguration rc;
private String datasetURN;
private boolean watermarkEnabled;
private final PathFilter pathFilter;
private final Optional<DataFileVersionStrategy> srcDataFileVersionStrategy;
private final Optional<DataFileVersionStrategy> dstDataFileVersionStrategy;
@Setter @Getter
protected String expectedSchema = null;
//Apply filter to directories
private final boolean applyFilterToDirectories;
//Version strategy from config store
private Optional<String> versionStrategyFromCS = Optional.absent();
public ConfigBasedDataset(ReplicationConfiguration rc, Properties props, CopyRoute copyRoute) {
this.props = props;
this.copyRoute = copyRoute;
this.rc = rc;
calculateDatasetURN();
this.watermarkEnabled =
Boolean.parseBoolean(this.props.getProperty(ConfigBasedDatasetsFinder.WATERMARK_ENABLE, "true"));
this.pathFilter = DatasetUtils.instantiatePathFilter(this.props);
this.applyFilterToDirectories =
Boolean.parseBoolean(this.props.getProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES, "false"));
this.srcDataFileVersionStrategy = initDataFileVersionStrategy(this.copyRoute.getCopyFrom(), rc, props);
this.dstDataFileVersionStrategy = initDataFileVersionStrategy(this.copyRoute.getCopyTo(), rc, props);
}
public ConfigBasedDataset(ReplicationConfiguration rc, Properties props, CopyRoute copyRoute, String datasetURN) {
this.props = props;
this.copyRoute = copyRoute;
this.rc = rc;
this.datasetURN = datasetURN;
this.pathFilter = DatasetUtils.instantiatePathFilter(this.props);
this.applyFilterToDirectories =
Boolean.parseBoolean(this.props.getProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES, "false"));
this.srcDataFileVersionStrategy = initDataFileVersionStrategy(this.copyRoute.getCopyFrom(), rc, props);
this.dstDataFileVersionStrategy = initDataFileVersionStrategy(this.copyRoute.getCopyTo(), rc, props);
}
/**
* Get the version strategy that can retrieve the data file version from the end point.
*
* @return the version strategy. Empty value when the version is not supported for this end point.
*/
private Optional<DataFileVersionStrategy> initDataFileVersionStrategy(EndPoint endPoint, ReplicationConfiguration rc, Properties props) {
// rc is the dataset config???
if (!(endPoint instanceof HadoopFsEndPoint)) {
log.warn("Data file version currently only handle the Hadoop Fs EndPoint replication");
return Optional.absent();
}
Configuration conf = HadoopUtils.newConfiguration();
try {
HadoopFsEndPoint hEndpoint = (HadoopFsEndPoint) endPoint;
FileSystem fs = FileSystem.get(hEndpoint.getFsURI(), conf);
// If configStore doesn't contain the strategy, check from job properties.
// If no strategy is found, default to the modification time strategy.
this.versionStrategyFromCS = rc.getVersionStrategyFromConfigStore();
String nonEmptyStrategy = versionStrategyFromCS.isPresent()? versionStrategyFromCS.get() :
props.getProperty(DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY, DataFileVersionStrategy.DEFAULT_DATA_FILE_VERSION_STRATEGY);
Config versionStrategyConfig = ConfigFactory.parseMap(ImmutableMap.of(
DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY, nonEmptyStrategy));
DataFileVersionStrategy strategy = DataFileVersionStrategy.instantiateDataFileVersionStrategy(fs, versionStrategyConfig);
log.debug("{} has version strategy {}", hEndpoint.getClusterName(), strategy.getClass().getName());
return Optional.of(strategy);
} catch (IOException e) {
log.error("Version strategy cannot be created due to {}", e);
return Optional.absent();
}
}
private void calculateDatasetURN() {
EndPoint e = this.copyRoute.getCopyTo();
if (e instanceof HadoopFsEndPoint) {
HadoopFsEndPoint copyTo = (HadoopFsEndPoint) e;
Configuration conf = HadoopUtils.newConfiguration();
try {
FileSystem copyToFs = FileSystem.get(copyTo.getFsURI(), conf);
this.datasetURN = copyToFs.makeQualified(copyTo.getDatasetPath()).toString();
} catch (IOException e1) {
// ignored
}
} else {
this.datasetURN = e.toString();
}
}
public boolean schemaCheckEnabled() { return this.rc.isSchemaCheckEnabled(); }
@Override
public String datasetURN() {
return this.datasetURN;
}
@Override
public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs, CopyConfiguration copyConfiguration)
throws IOException {
boolean enforceFileSizeMatch = this.rc.getEnforceFileSizeMatchFromConfigStore().isPresent()?
this.rc.getEnforceFileSizeMatchFromConfigStore().get() :
copyConfiguration.isEnforceFileLengthMatch();
List<CopyEntity> copyableFiles = Lists.newArrayList();
EndPoint copyFromRaw = copyRoute.getCopyFrom();
EndPoint copyToRaw = copyRoute.getCopyTo();
if (!(copyFromRaw instanceof HadoopFsEndPoint && copyToRaw instanceof HadoopFsEndPoint)) {
log.warn("Currently only handle the Hadoop Fs EndPoint replication");
return copyableFiles;
}
if (!this.srcDataFileVersionStrategy.isPresent() || !this.dstDataFileVersionStrategy.isPresent()) {
log.warn("Version strategy doesn't exist, cannot handle copy");
return copyableFiles;
}
if (!this.srcDataFileVersionStrategy.get().getClass().getName()
.equals(this.dstDataFileVersionStrategy.get().getClass().getName())) {
log.warn("Version strategy src: {} and dst: {} doesn't match, cannot handle copy.",
this.srcDataFileVersionStrategy.get().getClass().getName(),
this.dstDataFileVersionStrategy.get().getClass().getName());
return copyableFiles;
}
//For {@link HadoopFsEndPoint}s, set pathfilter and applyFilterToDirectories
HadoopFsEndPoint copyFrom = (HadoopFsEndPoint) copyFromRaw;
HadoopFsEndPoint copyTo = (HadoopFsEndPoint) copyToRaw;
copyFrom.setPathFilter(pathFilter);
copyFrom.setApplyFilterToDirectories(applyFilterToDirectories);
copyTo.setPathFilter(pathFilter);
copyTo.setApplyFilterToDirectories(applyFilterToDirectories);
if (this.watermarkEnabled) {
if ((!copyFromRaw.getWatermark().isPresent() && copyToRaw.getWatermark().isPresent()) || (
copyFromRaw.getWatermark().isPresent() && copyToRaw.getWatermark().isPresent()
&& copyFromRaw.getWatermark().get().compareTo(copyToRaw.getWatermark().get()) <= 0)) {
log.info(
"No need to copy as destination watermark >= source watermark with source watermark {}, for dataset with metadata {}",
copyFromRaw.getWatermark().isPresent() ? copyFromRaw.getWatermark().get().toJson() : "N/A",
this.rc.getMetaData());
return copyableFiles;
}
}
Configuration conf = HadoopUtils.newConfiguration();
FileSystem copyFromFs = FileSystem.get(copyFrom.getFsURI(), conf);
FileSystem copyToFs = FileSystem.get(copyTo.getFsURI(), conf);
Collection<FileStatus> allFilesInSource = copyFrom.getFiles();
Collection<FileStatus> allFilesInTarget = copyTo.getFiles();
Set<FileStatus> copyFromFileStatuses = Sets.newHashSet(allFilesInSource);
Map<Path, FileStatus> copyToFileMap = Maps.newHashMap();
for (FileStatus f : allFilesInTarget) {
copyToFileMap.put(PathUtils.getPathWithoutSchemeAndAuthority(f.getPath()), f);
}
Collection<Path> deletedPaths = Lists.newArrayList();
boolean watermarkMetadataCopied = false;
boolean deleteTargetIfNotExistOnSource = rc.isDeleteTargetIfNotExistOnSource();
for (FileStatus originFileStatus : copyFromFileStatuses) {
Path relative = PathUtils.relativizePath(PathUtils.getPathWithoutSchemeAndAuthority(originFileStatus.getPath()),
PathUtils.getPathWithoutSchemeAndAuthority(copyFrom.getDatasetPath()));
// construct the new path in the target file system
Path newPath = new Path(copyTo.getDatasetPath(), relative);
if (relative.toString().equals(ReplicaHadoopFsEndPoint.WATERMARK_FILE)) {
watermarkMetadataCopied = true;
}
boolean shouldCopy = true;
// Can optimize by using the mod time that has already been fetched
boolean useDirectGetModTime = this.srcDataFileVersionStrategy.isPresent()
&& this.srcDataFileVersionStrategy.get().getClass().getName().equals(
ModTimeDataFileVersionStrategy.class.getName());
if (copyToFileMap.containsKey(newPath)) {
Comparable srcVer = useDirectGetModTime ? originFileStatus.getModificationTime() :
this.srcDataFileVersionStrategy.get().getVersion(originFileStatus.getPath());
Comparable dstVer = useDirectGetModTime ? copyToFileMap.get(newPath).getModificationTime() :
this.dstDataFileVersionStrategy.get().getVersion(copyToFileMap.get(newPath).getPath());
// destination has higher version, skip the copy
if (srcVer.compareTo(dstVer) <= 0) {
if (!enforceFileSizeMatch || copyToFileMap.get(newPath).getLen() == originFileStatus.getLen()) {
log.debug("Copy from src {} (v:{}) to dst {} (v:{}) can be skipped.",
originFileStatus.getPath(), srcVer, copyToFileMap.get(newPath).getPath(), dstVer);
shouldCopy = false;
} else {
log.debug("Copy from src {} (v:{}) to dst {} (v:{}) can not be skipped due to unmatched file length.",
originFileStatus.getPath(), srcVer, copyToFileMap.get(newPath).getPath(), dstVer);
}
} else {
log.debug("Copy from src {} (v:{}) to dst {} (v:{}) is needed due to a higher version.",
originFileStatus.getPath(), srcVer, copyToFileMap.get(newPath).getPath(), dstVer);
}
} else {
log.debug("Copy from src {} to dst {} is needed because dst doesn't contain the file",
originFileStatus.getPath(), copyToFileMap.get(newPath));
}
if (shouldCopy) {
// need to remove those files in the target File System
if (copyToFileMap.containsKey(newPath)) {
deletedPaths.add(newPath);
}
CopyableFile copyableFile = CopyableFile
.fromOriginAndDestination(copyFromFs, originFileStatus, copyToFs.makeQualified(newPath), copyConfiguration)
.fileSet(PathUtils.getPathWithoutSchemeAndAuthority(copyTo.getDatasetPath()).toString())
.dataFileVersionStrategy(this.versionStrategyFromCS.isPresent()? this.versionStrategyFromCS.get(): null)
.build();
copyableFile.setFsDatasets(copyFromFs, copyToFs);
copyableFiles.add(copyableFile);
}
// clean up already checked paths
copyToFileMap.remove(newPath);
}
// delete the paths on target directory if NOT exists on source
if (deleteTargetIfNotExistOnSource) {
deletedPaths.addAll(copyToFileMap.keySet());
}
// delete old files first
if (!deletedPaths.isEmpty()) {
DeleteFileCommitStep deleteCommitStep = DeleteFileCommitStep.fromPaths(copyToFs, deletedPaths, this.props);
copyableFiles.add(
new PrePublishStep(copyTo.getDatasetPath().toString(), Maps.<String, String>newHashMap(), deleteCommitStep,
0));
}
// generate the watermark file even if watermark checking is disabled. Make sure it can come into functional once disired.
if ((!watermarkMetadataCopied) && copyFrom.getWatermark().isPresent()) {
copyableFiles.add(new PostPublishStep(copyTo.getDatasetPath().toString(), Maps.<String, String>newHashMap(),
new WatermarkMetadataGenerationCommitStep(copyTo.getFsURI().toString(), copyTo.getDatasetPath(),
copyFrom.getWatermark().get()), 1));
}
return copyableFiles;
}
}
| 2,637 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/DataFlowTopology.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Lists;
import lombok.Data;
import lombok.AllArgsConstructor;;
/**
* Class to represent the data flow topology from copy source to copy destinations. Each {@link DataFlowTopology} contains
* a list of {@link DataFlowTopology.DataFlowPath}s
*
*
*/
@Data
/**
* Some explanation combined with the example in configStore:
*
* {@link DataFlowTopology} is the whole block specified in,
* say {@link ReplicationConfiguration#DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE}, normally call this topology.
*
* {@link #dataFlowPaths} is representing a line like: tarock:[source,holdem]
*
* From {@link #dataFlowPaths} we can have a list of {@link CopyRoute}.
*/
public class DataFlowTopology {
private List<DataFlowPath> dataFlowPaths = new ArrayList<>();
public void addDataFlowPath(DataFlowPath p){
this.dataFlowPaths.add(p);
}
@Override
public String toString(){
Function<DataFlowPath, String> func =
new Function<DataFlowPath, String>() {
@Override
public String apply(DataFlowPath t) {
return t.toString();
}
};
return MoreObjects.toStringHelper(this.getClass())
.add("dataFlows:", Joiner.on(",").join(Lists.transform(this.dataFlowPaths, func))).toString();
}
@Data
@AllArgsConstructor
public static class DataFlowPath{
private List<CopyRoute> copyRoutes;
@Override
public String toString(){
Function<CopyRoute, String> func =
new Function<CopyRoute, String>() {
@Override
public String apply(CopyRoute t) {
return t.toString();
}
};
return MoreObjects.toStringHelper(this.getClass())
.add("copyPairs:", Joiner.on(",").join(Lists.transform(this.copyRoutes, func))).toString();
}
}
}
| 2,638 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorOptimizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Optional;
/**
* Provide the basic optimizer implementation in pull mode. The subclass should override the {@link #getOptimizedCopyRoute(List)} function
* @author mitu
*
*/
public class CopyRouteGeneratorOptimizer extends CopyRouteGeneratorBase {
@Override
public Optional<CopyRoute> getPullRoute(ReplicationConfiguration rc, EndPoint copyTo) {
if (rc.getCopyMode() == ReplicationCopyMode.PUSH)
return Optional.absent();
DataFlowTopology topology = rc.getDataFlowToplogy();
List<DataFlowTopology.DataFlowPath> paths = topology.getDataFlowPaths();
for (DataFlowTopology.DataFlowPath p : paths) {
List<CopyRoute> routes = p.getCopyRoutes();
if (routes.isEmpty()) {
continue;
}
// All routes under a path pointing to the same copyTo (replica)
if (routes.get(0).getCopyTo().equals(copyTo)) {
return getOptimizedCopyRoute(routes);
}
}
return Optional.absent();
}
public Optional<CopyRoute> getOptimizedCopyRoute(List<CopyRoute> routes) {
return Optional.absent();
}
}
| 2,639 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/HadoopFsEndPoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.net.URI;
import lombok.Getter;
import lombok.Setter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.typesafe.config.Config;
import org.apache.gobblin.util.HadoopUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.PathFilter;
@Slf4j
@Getter
@Setter
public abstract class HadoopFsEndPoint implements EndPoint {
private PathFilter pathFilter;
private boolean applyFilterToDirectories;
/**
*
* @return the hadoop cluster name for {@link EndPoint}s on Hadoop File System
*/
public abstract String getClusterName();
/**
* @return the hadoop cluster FileSystem URI
*/
public abstract URI getFsURI();
/**
*
* @return Deepest {@link org.apache.hadoop.fs.Path} that contains all files in the dataset.
*/
public abstract Path getDatasetPath();
public abstract Config getSelectionConfig();
/**
* A helper utility for data/filesystem availability checking
* @param path The path to be checked.
* @return If the filesystem/path exists or not.
*/
public boolean isPathAvailable(Path path) {
try {
Configuration conf = HadoopUtils.newConfiguration();
FileSystem fs = FileSystem.get(this.getFsURI(), conf);
if (fs.exists(path)) {
return true;
} else {
log.warn("The data path [" + path + "] is not available on FileSystem: " + this.getFsURI());
return false;
}
} catch (IOException ioe) {
log.warn("Errors occurred while checking path [" + path + "] existence " + this.getFsURI(), ioe);
return false;
}
}
@Override
public boolean isFileSystemAvailable() {
try {
FileSystem.get(this.getFsURI(), new Configuration());
} catch (IOException ioe){
log.error(String.format("FileSystem %s is not available", this.getFsURI()), ioe);
return false;
}
return true;
}
public boolean isDatasetAvailable(Path datasetPath) {
return isPathAvailable(datasetPath);
}
}
| 2,640 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedDatasetsFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.api.VersionDoesNotExistException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.Either;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.IteratorExecutor;
import lombok.extern.slf4j.Slf4j;
/**
* Based on the configuration store to find all {@link ConfigBasedDataset}
* @author mitu
*
*/
@Slf4j
public abstract class ConfigBasedDatasetsFinder implements DatasetsFinder {
// specify the whitelist tag in the config store used by data replication or data retention
// the datasets which import this tag will be processed by data replication or data retention
public static final String GOBBLIN_CONFIG_STORE_WHITELIST_TAG =
ConfigurationKeys.CONFIG_BASED_PREFIX + ".whitelist.tag";
// specify the blacklist tags in the config store used by data replication or data retention
// the datasets which import these tags will NOT be processed by data replication or data retention
// and blacklist override the whitelist
public static final String GOBBLIN_CONFIG_STORE_BLACKLIST_TAGS =
ConfigurationKeys.CONFIG_BASED_PREFIX + ".blacklist.tags";
// specify the common root for all the datasets which will be processed by data replication/data retention
public static final String GOBBLIN_CONFIG_STORE_DATASET_COMMON_ROOT =
ConfigurationKeys.CONFIG_BASED_PREFIX + ".dataset.common.root";
// In addition to the white/blacklist tags, this configuration let the user to whitelist some datasets
// in the job-level configuration, which is not specified in configStore
// as to have easier approach to black/whitelist some datasets on operation side.
// White job-level blacklist is different from tag-based blacklist since the latter is part of dataset discovery
// but the former is filtering process.
// Tag-based dataset discover happens at the first, before the job-level glob-pattern based filtering.
public static final String JOB_LEVEL_BLACKLIST = CopyConfiguration.COPY_PREFIX + ".configBased.blacklist" ;
// There are some cases that WATERMARK checking is desired, like
// Unexpected data loss on target while not changing watermark accordingly.
// This configuration make WATERMARK checking configurable for operation convenience, default true
public static final String WATERMARK_ENABLE = CopyConfiguration.COPY_PREFIX + ".configBased.watermark.enabled" ;
protected final String storeRoot;
protected final Path commonRoot;
protected final Path whitelistTag;
protected final Optional<List<Path>> blacklistTags;
protected final ConfigClient configClient;
protected final Properties props;
private final int threadPoolSize;
/**
* The blacklist Pattern, will be used in ConfigBasedDataset class which has the access to FileSystem.
*/
private final Optional<List<String>> blacklistPatterns;
public ConfigBasedDatasetsFinder(FileSystem fs, Properties jobProps) throws IOException {
// ignore the input FileSystem , the source file system could be different for different datasets
Preconditions.checkArgument(jobProps.containsKey(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI),
"missing required config entery " + ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI);
Preconditions.checkArgument(jobProps.containsKey(GOBBLIN_CONFIG_STORE_WHITELIST_TAG),
"missing required config entery " + GOBBLIN_CONFIG_STORE_WHITELIST_TAG);
Preconditions.checkArgument(jobProps.containsKey(GOBBLIN_CONFIG_STORE_DATASET_COMMON_ROOT),
"missing required config entery " + GOBBLIN_CONFIG_STORE_DATASET_COMMON_ROOT);
this.storeRoot = jobProps.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI);
this.commonRoot = PathUtils.mergePaths(new Path(this.storeRoot),
new Path(jobProps.getProperty(GOBBLIN_CONFIG_STORE_DATASET_COMMON_ROOT)));
this.whitelistTag = PathUtils.mergePaths(new Path(this.storeRoot),
new Path(jobProps.getProperty(GOBBLIN_CONFIG_STORE_WHITELIST_TAG)));
this.threadPoolSize = jobProps.containsKey(CopySource.MAX_CONCURRENT_LISTING_SERVICES)
? Integer.parseInt(jobProps.getProperty(CopySource.MAX_CONCURRENT_LISTING_SERVICES))
: CopySource.DEFAULT_MAX_CONCURRENT_LISTING_SERVICES;
if (jobProps.containsKey(GOBBLIN_CONFIG_STORE_BLACKLIST_TAGS)) {
List<String> disableStrs = Splitter.on(",").omitEmptyStrings()
.splitToList(jobProps.getProperty(GOBBLIN_CONFIG_STORE_BLACKLIST_TAGS));
List<Path> disablePaths = new ArrayList<Path>();
for (String s : disableStrs) {
disablePaths.add(PathUtils.mergePaths(new Path(this.storeRoot), new Path(s)));
}
this.blacklistTags = Optional.of(disablePaths);
} else {
this.blacklistTags = Optional.absent();
}
configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
this.props = jobProps;
if (props.containsKey(JOB_LEVEL_BLACKLIST)) {
this.blacklistPatterns = Optional.of(Splitter.on(",").omitEmptyStrings().splitToList(props.getProperty(JOB_LEVEL_BLACKLIST)));
} else {
this.blacklistPatterns = Optional.absent();
}
}
/**
* Semantic of black/whitelist:
* - Whitelist always respect blacklist.
* - Job-level blacklist is reponsible for dataset filtering instead of dataset discovery. i.e.
* There's no implementation of job-level whitelist currently.
*/
protected Set<URI> getValidDatasetURIs(Path datasetCommonRoot) {
Collection<URI> allDatasetURIs;
Set<URI> disabledURISet = new HashSet();
// This try block basically populate the Valid dataset URI set.
try {
allDatasetURIs = configClient.getImportedBy(new URI(whitelistTag.toString()), true);
enhanceDisabledURIsWithBlackListTag(disabledURISet);
} catch ( ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException
| URISyntaxException e) {
log.error("Caught error while getting all the datasets URIs " + e.getMessage());
throw new RuntimeException(e);
}
return getValidDatasetURIsHelper(allDatasetURIs, disabledURISet, datasetCommonRoot);
}
/**
* Extended signature for testing convenience.
*/
protected static Set<URI> getValidDatasetURIsHelper(Collection<URI> allDatasetURIs, Set<URI> disabledURISet, Path datasetCommonRoot){
if (allDatasetURIs == null || allDatasetURIs.isEmpty()) {
return ImmutableSet.of();
}
Comparator<URI> pathLengthComparator = new Comparator<URI>() {
public int compare(URI c1, URI c2) {
return c1.getPath().length() - c2.getPath().length();
}
};
List<URI> sortedDatasetsList = new ArrayList<URI>(allDatasetURIs);
// sort the URI based on the path length to make sure the parent path appear before children
Collections.sort(sortedDatasetsList, pathLengthComparator);
TreeSet<URI> uriSet = new TreeSet<URI>();
Set<URI> noneLeaf = new HashSet<URI>();
for (URI u : sortedDatasetsList) {
// filter out none common root
if (PathUtils.isAncestor(datasetCommonRoot, new Path(u.getPath()))) {
URI floor = uriSet.floor(u);
// check for ancestor Paths
if (floor != null && PathUtils.isAncestor(new Path(floor.getPath()), new Path(u.getPath()))) {
noneLeaf.add(floor);
}
uriSet.add(u);
}
}
// only get the leaf nodes
Set<URI> validURISet = new HashSet<URI>();
for (URI u : uriSet) {
if (!noneLeaf.contains(u)) {
validURISet.add(u);
}
}
// remove disabled URIs
for (URI disable : disabledURISet) {
if (validURISet.remove(disable)) {
log.info("skip disabled dataset " + disable);
} else {
log.info("There's no URI " + disable + " available in validURISet.");
}
}
return validURISet;
}
private void enhanceDisabledURIsWithBlackListTag(Set<URI> disabledURIs) throws
URISyntaxException,
ConfigStoreFactoryDoesNotExistsException,
ConfigStoreCreationException,
VersionDoesNotExistException {
if (this.blacklistTags.isPresent()) {
for (Path s : this.blacklistTags.get()) {
disabledURIs.addAll(configClient.getImportedBy(new URI(s.toString()), true));
}
}
}
@Override
public Path commonDatasetRoot() {
return this.commonRoot;
}
/**
* Based on the {@link #whitelistTag}, find all URI which imports the tag. Then filter out
*
* 1. disabled dataset URI
* 2. None leaf dataset URI
*
* Then created {@link ConfigBasedDataset} based on the {@link Config} of the URIs
*/
@Override
public List<Dataset> findDatasets() throws IOException {
Set<URI> leafDatasets = getValidDatasetURIs(this.commonRoot);
if (leafDatasets.isEmpty()) {
return ImmutableList.of();
}
// Parallel execution for copyDataset for performance consideration.
final List<Dataset> result = new CopyOnWriteArrayList<>();
Iterator<Callable<Void>> callableIterator =
Iterators.transform(leafDatasets.iterator(), new Function<URI, Callable<Void>>() {
@Override
public Callable<Void> apply(final URI datasetURI) {
return findDatasetsCallable(configClient, datasetURI, props, blacklistPatterns, result);
}
});
this.executeItertorExecutor(callableIterator);
log.info("found {} datasets in ConfigBasedDatasetsFinder", result.size());
return result;
}
protected void executeItertorExecutor(Iterator<Callable<Void>> callableIterator) throws IOException {
try {
IteratorExecutor<Void> executor = new IteratorExecutor<>(callableIterator, this.threadPoolSize,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));
List<Either<Void, ExecutionException>> results = executor.executeAndGetResults();
IteratorExecutor.logFailures(results, log, 10);
} catch (InterruptedException ie) {
throw new IOException("Dataset finder is interrupted.", ie);
}
}
/**
* Helper funcition for converting datasetURN into URI
* Note that here the URN can possibly being specified with pattern, i.e. with wildcards like `*`
* It will be resolved by configStore.
*/
private URI datasetURNtoURI(String datasetURN) {
try {
return new URI(PathUtils.mergePaths(new Path(this.storeRoot), new Path(datasetURN)).toString());
}catch (URISyntaxException e) {
log.error("Dataset with URN:" + datasetURN + " cannot be converted into URI. Skip the dataset");
return null;
}
}
protected abstract Callable<Void> findDatasetsCallable(final ConfigClient confClient,
final URI u, final Properties p, Optional<List<String>> blacklistPatterns,
final Collection<Dataset> datasets);
}
| 2,641 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/HadoopFsEndPointDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.HadoopUtils;
/**
* {@link FileSystemDataset} wrapper class for {@link HadoopFsEndPoint}
* @author mitu
*
*/
public class HadoopFsEndPointDataset implements FileSystemDataset{
private final HadoopFsEndPoint endPoint;
private Path qualifiedDatasetRoot;
public HadoopFsEndPointDataset(HadoopFsEndPoint endPoint){
this.endPoint = endPoint;
Configuration conf = HadoopUtils.newConfiguration();
try {
FileSystem fs = FileSystem.get(this.endPoint.getFsURI(), conf);
qualifiedDatasetRoot = fs.makeQualified(this.endPoint.getDatasetPath());
} catch (IOException e1) {
// ignored
qualifiedDatasetRoot = this.endPoint.getDatasetPath();
}
}
@Override
public String datasetURN() {
return this.qualifiedDatasetRoot.toString();
}
@Override
public Path datasetRoot() {
return this.qualifiedDatasetRoot;
}
}
| 2,642 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/DataFlowTopologyPickerByHadoopFsSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
@Alias(value="DataFlowTopologyPickerByHadoopFsSource")
public class DataFlowTopologyPickerByHadoopFsSource implements DataFlowTopologyPickerBySource {
@Override
public Config getPreferredRoutes(Config allTopologies, EndPoint source) {
Preconditions.checkArgument(source instanceof HadoopFsEndPoint,
"source is NOT expectecd class " + HadoopFsEndPoint.class.getCanonicalName());
HadoopFsEndPoint hadoopFsSource = (HadoopFsEndPoint)source;
String clusterName = hadoopFsSource.getClusterName();
Preconditions.checkArgument(allTopologies.hasPath(clusterName),
"Can not find preferred topology for cluster name " + clusterName);
return allTopologies.getConfig(clusterName);
}
}
| 2,643 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/DataFlowTopologyPickerBySource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.typesafe.config.Config;
/**
* Used to pick preferred {@link DataFlowTopology} in {@link com.typesafe.config.Config} format
* @author mitu
*
*/
public interface DataFlowTopologyPickerBySource {
public Config getPreferredRoutes(Config allDataFlowTopologies, EndPoint source);
}
| 2,644 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRoute.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.google.common.base.MoreObjects;
import lombok.AllArgsConstructor;
import lombok.Data;
@AllArgsConstructor
@Data
public class CopyRoute {
private final EndPoint copyFrom;
private final EndPoint copyTo;
@Override
public String toString() {
return MoreObjects.toStringHelper(this.getClass()).add("copyFrom", this.getCopyFrom()).add("copyTo", this.getCopyTo())
.toString();
}
}
| 2,645 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/HadoopFsEndPointFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
@Alias(value="HadoopFsEndPointFactory")
public class HadoopFsEndPointFactory implements EndPointFactory {
public static final String HADOOP_FS_CONFIG_KEY = "hadoopfs";
@Override
public HadoopFsEndPoint buildSource(Config sourceConfig, Config selectionConfig) {
Preconditions.checkArgument(sourceConfig.hasPath(HADOOP_FS_CONFIG_KEY),
"missing required config entery " + HADOOP_FS_CONFIG_KEY);
return new SourceHadoopFsEndPoint(new HadoopFsReplicaConfig(sourceConfig.getConfig(HADOOP_FS_CONFIG_KEY)), selectionConfig);
}
@Override
public HadoopFsEndPoint buildReplica(Config replicaConfig, String replicaName, Config selectionConfig) {
Preconditions.checkArgument(replicaConfig.hasPath(HADOOP_FS_CONFIG_KEY),
"missing required config entery " + HADOOP_FS_CONFIG_KEY);
return new ReplicaHadoopFsEndPoint(new HadoopFsReplicaConfig(replicaConfig.getConfig(HADOOP_FS_CONFIG_KEY)),
replicaName, selectionConfig);
}
}
| 2,646 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Optional;
/**
* Basic implementation for picking a {@link CopyRoute} in Push mode
* @author mitu
*
*/
public class CopyRouteGeneratorBase implements CopyRouteGenerator {
@Override
public Optional<CopyRoute> getPullRoute(ReplicationConfiguration rc, EndPoint copyTo) {
return Optional.absent();
}
/**
* for push mode, there is no optimization
*/
@Override
public Optional<List<CopyRoute>> getPushRoutes(ReplicationConfiguration rc, EndPoint copyFrom) {
if (rc.getCopyMode() == ReplicationCopyMode.PULL)
return Optional.absent();
DataFlowTopology topology = rc.getDataFlowToplogy();
List<DataFlowTopology.DataFlowPath> paths = topology.getDataFlowPaths();
for (DataFlowTopology.DataFlowPath p : paths) {
/**
* Routes are list of pairs that generated from config in the format of topology specification.
* For example, source:[holdem, war] will end up with
* List<(source, holdem), (source, war)>
*/
List<CopyRoute> routes = p.getCopyRoutes();
if (routes.isEmpty()) {
continue;
}
// All the routes should has the same copyFrom but different copyTo.
if (routes.get(0).getCopyFrom().equals(copyFrom)) {
return Optional.of(routes);
}
}
return Optional.absent();
}
}
| 2,647 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/EndPointFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.typesafe.config.Config;
public interface EndPointFactory {
public EndPoint buildSource(Config sourceConfig, Config selectionConfig);
public EndPoint buildReplica(Config replicasConfig, String replicaName, Config selectionConfig);
}
| 2,648 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedMultiDatasets.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.google.common.annotations.VisibleForTesting;
import org.apache.gobblin.dataset.Dataset;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigRenderOptions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import lombok.extern.slf4j.Slf4j;
/**
* Based on single dataset configuration in {@link Config} format, in Pull mode replication, there could be multiple
* {@link ConfigBasedDataset} generated. For example, if two replicas exists on the same copyTo cluster,
* say replica1 and replica2, then there will be 2 {@link ConfigBasedDataset} generated, one for replication data from
* copyFrom {@link EndPoint} to replica1, the other from copyFrom {@link EndPoint} to replica2
*
* This class will be responsible to generate those {@link ConfigBasedDataset}s
*
* @author mitu
*/
@Slf4j
public class ConfigBasedMultiDatasets {
private final Properties props;
private final List<Dataset> datasets = new ArrayList<>();
private Optional<List<Pattern>> blacklist = Optional.of(new ArrayList<>());
/**
* if push mode is set in property, only replicate data when
* 1. Push mode is set in Config store
* 2. CopyTo cluster in sync with property with 'writer.fs.uri'
*/
public static final String REPLICATION_PUSH_MODE = CopyConfiguration.COPY_PREFIX + ".replicationPushMode";
// Dummy constructor, return empty datasets.
public ConfigBasedMultiDatasets(){
this.props = new Properties();
}
public ConfigBasedMultiDatasets (Config c, Properties props,
Optional<List<String>> blacklistPatterns){
this.props = props;
blacklist = patternListInitHelper(blacklistPatterns);
try {
FileSystem executionCluster = FileSystem.get(new Configuration());
URI executionClusterURI = executionCluster.getUri();
ReplicationConfiguration rc = ReplicationConfiguration.buildFromConfig(c);
// push mode
if(this.props.containsKey(REPLICATION_PUSH_MODE) && Boolean.parseBoolean(this.props.getProperty(REPLICATION_PUSH_MODE))){
generateDatasetInPushMode(rc, executionClusterURI);
}
// default pull mode
else{
generateDatasetInPullMode(rc, executionClusterURI);
}
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
log.error("Can not create Replication Configuration from raw config "
+ c.root().render(ConfigRenderOptions.defaults().setComments(false).setOriginComments(false)), e);
} catch (IOException ioe) {
log.error("Can not decide current execution cluster ", ioe);
}
}
private Optional<List<Pattern>> patternListInitHelper(Optional<List<String>> patterns){
if (patterns.isPresent() && patterns.get().size() >= 1) {
List<Pattern> tmpPatterns = new ArrayList<>();
for (String pattern : patterns.get()){
tmpPatterns.add(Pattern.compile(pattern));
}
return Optional.of(tmpPatterns);
}
else{
return Optional.absent();
}
}
private void generateDatasetInPushMode(ReplicationConfiguration rc, URI executionClusterURI){
if(rc.getCopyMode()== ReplicationCopyMode.PULL){
log.info("Skip process pull mode dataset with meta data{} as job level property specify push mode ", rc.getMetaData());
return;
}
if (!this.props.containsKey(ConfigurationKeys.WRITER_FILE_SYSTEM_URI)){
return;
}
String pushModeTargetCluster = this.props.getProperty(ConfigurationKeys.WRITER_FILE_SYSTEM_URI);
// PUSH mode
CopyRouteGenerator cpGen = rc.getCopyRouteGenerator();
List<EndPoint> replicas = rc.getReplicas();
List<EndPoint> pushCandidates = new ArrayList<EndPoint>(replicas);
pushCandidates.add(rc.getSource());
for(EndPoint pushFrom: pushCandidates){
if(needGenerateCopyEntity(pushFrom, executionClusterURI)){
Optional<List<CopyRoute>> copyRoutes = cpGen.getPushRoutes(rc, pushFrom);
if(!copyRoutes.isPresent()) {
log.warn("In Push mode, did not found any copyRoute for dataset with meta data {}", rc.getMetaData());
continue;
}
/**
* For-Loop responsibility:
* For each of the {@link CopyRoute}, generate a {@link ConfigBasedDataset}.
*/
for(CopyRoute cr: copyRoutes.get()){
if(cr.getCopyTo() instanceof HadoopFsEndPoint){
HadoopFsEndPoint ep = (HadoopFsEndPoint)cr.getCopyTo();
if(ep.getFsURI().toString().equals(pushModeTargetCluster)){
// For a candidate dataset, iterate thru. all available blacklist patterns.
ConfigBasedDataset configBasedDataset = new ConfigBasedDataset(rc, this.props, cr);
if (blacklistFilteringHelper(configBasedDataset, this.blacklist)){
this.datasets.add(configBasedDataset);
}
else{
log.info("Dataset" + configBasedDataset.datasetURN() + " has been filtered out because of blacklist pattern:"
+ this.blacklist.get().toString());
}
}
}
}// inner for loops ends
}
}// outer for loop ends
}
private void generateDatasetInPullMode(ReplicationConfiguration rc, URI executionClusterURI){
if(rc.getCopyMode()== ReplicationCopyMode.PUSH){
log.info("Skip process push mode dataset with meta data{} as job level property specify pull mode ", rc.getMetaData());
return;
}
// PULL mode
CopyRouteGenerator cpGen = rc.getCopyRouteGenerator();
/**
* Replicas comes from the 'List' which will normally be set in gobblin.replicas
* Basically this is all possible destination for this replication job.
*/
List<EndPoint> replicas = rc.getReplicas();
for(EndPoint replica: replicas){
// Only pull the data from current execution cluster
if(needGenerateCopyEntity(replica, executionClusterURI)){
/*
* CopyRoute represent a coypable Dataset to execute, e.g. if you specify source:[war, holdem],
* there could be two {@link #ConfigBasedDataset} generated.
*/
Optional<CopyRoute> copyRoute = cpGen.getPullRoute(rc, replica);
if(copyRoute.isPresent()){
ConfigBasedDataset configBasedDataset = new ConfigBasedDataset(rc, this.props, copyRoute.get());
if (blacklistFilteringHelper(configBasedDataset, this.blacklist)){
this.datasets.add(configBasedDataset);
}
else{
log.info("Dataset" + configBasedDataset.datasetURN() + " has been filtered out because of blacklist pattern:"
+ this.blacklist.get().toString());
}
}
}
}
}
@VisibleForTesting
/**
* Return false if the target configBasedDataset should be kept in the blacklist.
*/
public boolean blacklistFilteringHelper(ConfigBasedDataset configBasedDataset, Optional<List<Pattern>> patternList){
String datasetURN = configBasedDataset.datasetURN();
if (patternList.isPresent()) {
for(Pattern pattern: patternList.get()) {
if (pattern.matcher(datasetURN).find()){
return false;
}
}
// If the dataset get thru. all blacklist check, accept it.
return true;
}
// If blacklist not specified, automatically accept the dataset.
else {
return true;
}
}
public List<Dataset> getConfigBasedDatasetList(){
return this.datasets;
}
private boolean needGenerateCopyEntity(EndPoint e, URI executionClusterURI){
if(!(e instanceof HadoopFsEndPoint)){
return false;
}
HadoopFsEndPoint ep = (HadoopFsEndPoint)e;
return ep.getFsURI().equals(executionClusterURI);
}
}
| 2,649 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorOptimizedNetworkBandwidth.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alias;
/**
* In Pull mode, Optimized for network bandwidth : pick the first available data source
* @author mitu
*
*/
@Alias(value="OptimizedNetworkBandwidth")
public class CopyRouteGeneratorOptimizedNetworkBandwidth extends CopyRouteGeneratorOptimizer {
/**
*
* @param routes
* @return the first available {@link CopyRoute}
*/
@Override
public Optional<CopyRoute> getOptimizedCopyRoute(List<CopyRoute> routes) {
for (CopyRoute copyRoute : routes) {
if (!(copyRoute.getCopyFrom() instanceof HadoopFsEndPoint)) {
continue;
}
HadoopFsEndPoint copyFrom = (HadoopFsEndPoint) (copyRoute.getCopyFrom());
if(copyFrom.isDatasetAvailable(copyFrom.getDatasetPath())) {
return Optional.of(copyRoute);
}
}
return Optional.absent();
}
}
| 2,650 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedCopyableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import org.apache.hadoop.fs.FileSystem;
import com.typesafe.config.Config;
import com.google.common.base.Optional;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.dataset.Dataset;
import lombok.extern.slf4j.Slf4j;
/**
* Based on the ConfigStore object to find all {@link ConfigBasedMultiDatasets} to replicate.
* Specifically for replication job.
* Normal DistcpNG Job which doesn'involve Dataflow concepts should not use this DatasetFinder but
* different implementation of {@link ConfigBasedDatasetsFinder}.
*/
@Slf4j
public class ConfigBasedCopyableDatasetFinder extends ConfigBasedDatasetsFinder {
public ConfigBasedCopyableDatasetFinder(FileSystem fs, Properties jobProps) throws IOException{
super(fs, jobProps);
}
protected Callable<Void> findDatasetsCallable(final ConfigClient confClient,
final URI u, final Properties p, Optional<List<String>> blacklistPatterns, final Collection<Dataset> datasets) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
// Process each {@link Config}, find dataset and add those into the datasets
Config c = confClient.getConfig(u);
List<Dataset> datasetForConfig =
new ConfigBasedMultiDatasets(c, p, blacklistPatterns).getConfigBasedDatasetList();
datasets.addAll(datasetForConfig);
return null;
}
};
}
}
| 2,651 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Optional;
/**
* Used to indicate the interface for generating the {@link CopyRoute} based on
*
* <ul>
* <li>{@link ReplicationConfiguration}
* <li>in Pull mode, the copy to {@link EndPoint}
* <li>in Push mode, the copy from {@link EndPoint}
* </ul>
* @author mitu
*
*/
public interface CopyRouteGenerator {
// implied for pull mode
public Optional<CopyRoute> getPullRoute(ReplicationConfiguration rc, EndPoint copyTo);
// implied for push mode
public Optional<List<CopyRoute>> getPushRoutes(ReplicationConfiguration rc, EndPoint copyFrom);
}
| 2,652 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ReplicationDataValidPathPicker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Ordering;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Used to pick the valid Paths for data replication based on {@link Config}
* @author mitu
*
*/
public class ReplicationDataValidPathPicker {
public static final String POLICY_CLASS = "selection.policy.class";
public static final String FINDER_CLASS = "version.finder.class";
@SuppressWarnings("unchecked")
public static Collection<Path> getValidPaths(HadoopFsEndPoint hadoopFsEndPoint) throws IOException{
Config selectionConfig = hadoopFsEndPoint.getSelectionConfig();
FileSystemDataset tmpDataset = new HadoopFsEndPointDataset(hadoopFsEndPoint);
FileSystem theFs = FileSystem.get(hadoopFsEndPoint.getFsURI(), new Configuration());
/**
* Use {@link FileSystemDatasetVersion} as
* {@link DateTimeDatasetVersionFinder} / {@link GlobModTimeDatasetVersionFinder} use {@link TimestampedDatasetVersion}
* {@link SingleVersionFinder} uses {@link FileStatusDatasetVersion}
*/
VersionFinder<FileSystemDatasetVersion> finder;
try {
finder = (VersionFinder<FileSystemDatasetVersion>) ConstructorUtils
.invokeConstructor(Class.forName(selectionConfig.getString(FINDER_CLASS)), theFs, selectionConfig);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
List<FileSystemDatasetVersion> versions =
Ordering.natural().reverse().sortedCopy(finder.findDatasetVersions(tmpDataset));
VersionSelectionPolicy<FileSystemDatasetVersion> selector;
try {
selector = (VersionSelectionPolicy<FileSystemDatasetVersion>) ConstructorUtils
.invokeConstructor(Class.forName(selectionConfig.getString(POLICY_CLASS)), selectionConfig);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
Collection<FileSystemDatasetVersion> versionsSelected = selector.listSelectedVersions(versions);
List<Path> result = new ArrayList<Path>();
for(FileSystemDatasetVersion t: versionsSelected){
// get the first element out
result.add(t.getPaths().iterator().next());
}
return result;
}
}
| 2,653 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/WatermarkMetadataUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Splitter;
import com.google.gson.JsonParser;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.source.extractor.WatermarkSerializerHelper;
/**
* Utility class to serialize and deserialize the {@link Watermark}
* @author mitu
*
*/
public class WatermarkMetadataUtil {
public static final String DELIMITER = "\n";
public static String serialize(Watermark watermark) {
return watermark.getClass().getCanonicalName() + DELIMITER + watermark.toJson().toString();
}
public static Watermark deserialize(String content) throws WatermarkMetadataMulFormatException {
List<String> tmp = Splitter.on(DELIMITER).trimResults().omitEmptyStrings().splitToList(content);
if (tmp.size() < 2) {
throw new WatermarkMetadataMulFormatException("wrong format " + content);
}
String classname = tmp.get(0);
String jsonStr = tmp.get(1);
try {
Class<? extends Watermark> watermarkClass = (Class<? extends Watermark>) Class.forName(classname);
return WatermarkSerializerHelper.convertJsonToWatermark(new JsonParser().parse(jsonStr), watermarkClass);
} catch (ClassNotFoundException e) {
throw new WatermarkMetadataMulFormatException("wrong format " + e.getMessage());
}
}
static class WatermarkMetadataMulFormatException extends Exception {
private static final long serialVersionUID = 6748785718027224698L;
public WatermarkMetadataMulFormatException(String s) {
super(s);
}
}
}
| 2,654 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorOptimizedLatency.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Used to generate the {@link CopyRoute} based on the {@link DataFlowTopology} optimized for latency.
*
* In Pull mode, query multiple data sources and pick the data source with the highest watermark
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.Serializable;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.source.extractor.ComparableWatermark;
/**
* In Pull mode, Optimized for data replication latency: pick the highest watermark from all the data sources
* @author mitu
*
*/
@Alias(value = "OptimizedLatency")
public class CopyRouteGeneratorOptimizedLatency extends CopyRouteGeneratorOptimizer {
/**
*
* @param routes
* @return the {@link CopyRoute} which has the highest watermark
*/
@Override
public Optional<CopyRoute> getOptimizedCopyRoute(List<CopyRoute> routes) {
CopyRoute preferred = Collections.max(routes, new CopyRouteComparatorBySourceWatermark());
return Optional.of(preferred);
}
static class CopyRouteComparatorBySourceWatermark implements Comparator<CopyRoute>, Serializable{
private static final long serialVersionUID = 1439642339646179830L;
@Override
public int compare(CopyRoute o1, CopyRoute o2) {
EndPoint from1 = o1.getCopyFrom();
EndPoint from2 = o2.getCopyFrom();
Optional<ComparableWatermark> w1 = from1.getWatermark();
Optional<ComparableWatermark> w2 = from2.getWatermark();
// both are absent
if(!w1.isPresent() && !w2.isPresent()){
return 0;
}
if(!w2.isPresent()){
return 1;
}
if(!w1.isPresent()){
return -1;
}
return w1.get().compareTo(w2.get());
}
}
}
| 2,655 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ReplicationCopyMode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.typesafe.config.Config;
/**
* Specify the replication copy mode, either Pull or Push
* @author mitu
*
*/
public enum ReplicationCopyMode {
PULL("pull"),
PUSH("push");
private final String name;
ReplicationCopyMode(String name) {
this.name = name;
}
@Override
public String toString() {
return this.name;
}
/**
* Get a {@link ReplicationCopyMode} for the given name.
*
* @param name the given name
* @return a {@link ReplicationCopyMode} for the given name
*/
public static ReplicationCopyMode forName(String name) {
return ReplicationCopyMode.valueOf(name.toUpperCase());
}
public static ReplicationCopyMode getReplicationCopyMode(Config config) {
ReplicationCopyMode copyMode = config.hasPath(ReplicationConfiguration.REPLICATION_COPY_MODE)
? ReplicationCopyMode.forName(config.getString(ReplicationConfiguration.REPLICATION_COPY_MODE))
: ReplicationCopyMode.PULL;
return copyMode;
}
}
| 2,656 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/EndPoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.FileStatus;
import com.google.common.base.Optional;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
/**
* Used to encapsulate all the information of a replica, including original source, during replication process.
*
* <ul>
* <li>Configuration of the data reside on this replica
* <li>Whether the replica is original source
* <li>The {@link Watermark} of this replica
* </ul>
* @author mitu
*
*/
public interface EndPoint {
/**
* @return true iff this represents the original source
*/
public boolean isSource();
/**
* @return the end point name
*/
public String getEndPointName();
/**
*
* @return the {@link Watermark} of the replica
*/
public Optional<ComparableWatermark> getWatermark();
/**
* @return whether this {@link EndPoint} is available to replica data
*/
public boolean isFileSystemAvailable();
/**
*
* @return all the {@link FileStatus}s of this {@link EndPoint} in the context of data replication
*/
public Collection<FileStatus> getFiles() throws IOException;
}
| 2,657 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorOptimizedNetworkBandwidthForTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import com.google.common.base.Optional;
/**
* This class is necessary for unit test purpose:
* - Mockito cannot mimic the situation where a file system contains a dataset path.
* - In {@link CopyRouteGeneratorOptimizedNetworkBandwidth} we neet to check if the dataset is really on the filesystem.
* - The {@link CopyRouteGeneratorTest} is testing for route selection,
* so no matter the dataset is indeed existed, the testing purpose can still be achieved.
*/
public class CopyRouteGeneratorOptimizedNetworkBandwidthForTest extends CopyRouteGeneratorOptimizer {
@Override
public Optional<CopyRoute> getOptimizedCopyRoute(List<CopyRoute> routes) {
for (CopyRoute copyRoute : routes) {
if (!(copyRoute.getCopyFrom() instanceof HadoopFsEndPoint)) {
continue;
}
HadoopFsEndPoint copyFrom = (HadoopFsEndPoint) (copyRoute.getCopyFrom());
if (copyFrom.isFileSystemAvailable()) {
return Optional.of(copyRoute);
}
}
return Optional.absent();
}
}
| 2,658 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/SourceHadoopFsEndPoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.util.FileListUtils;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class SourceHadoopFsEndPoint extends HadoopFsEndPoint {
@Getter
private final HadoopFsReplicaConfig rc;
@Getter
private final Config selectionConfig;
private boolean initialized = false;
private Optional<ComparableWatermark> cachedWatermark = Optional.absent();
private Collection<FileStatus> allFileStatus = new ArrayList<>();
public SourceHadoopFsEndPoint(HadoopFsReplicaConfig rc, Config selectionConfig) {
this.rc = rc;
this.selectionConfig = selectionConfig;
}
@Override
public synchronized Collection<FileStatus> getFiles() throws IOException {
if (!this.initialized) {
this.getWatermark();
}
return this.allFileStatus;
}
@Override
public synchronized Optional<ComparableWatermark> getWatermark() {
if (this.initialized) {
return this.cachedWatermark;
}
try {
long curTs = -1;
FileSystem fs = FileSystem.get(rc.getFsURI(), new Configuration());
Collection<Path> validPaths = ReplicationDataValidPathPicker.getValidPaths(this);
for (Path p : validPaths) {
try {
this.allFileStatus.addAll(FileListUtils.listFilesRecursively(fs, p, super.getPathFilter(), super.isApplyFilterToDirectories()));
} catch (Exception e) {
log.error(String.format("Error while try read file in directory %s to get watermark", p.toString()));
}
}
for (FileStatus f : this.allFileStatus) {
if (f.getModificationTime() > curTs) {
curTs = f.getModificationTime();
}
}
ComparableWatermark result = new LongWatermark(curTs);
this.cachedWatermark = Optional.of(result);
if (this.cachedWatermark.isPresent()) {
this.initialized = true;
}
return this.cachedWatermark;
} catch (IOException e) {
log.error("Error while retrieve the watermark for " + this);
return this.cachedWatermark;
}
}
@Override
public boolean isSource() {
return true;
}
@Override
public String getEndPointName() {
return ReplicationConfiguration.REPLICATION_SOURCE;
}
@Override
public String getClusterName() {
return this.rc.getClustername();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this.getClass())
.add("is source", this.isSource())
.add("end point name", this.getEndPointName())
.add("hadoopfs config", this.rc)
.toString();
}
@Override
public URI getFsURI() {
return this.rc.getFsURI();
}
@Override
public Path getDatasetPath() {
return this.rc.getPath();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((rc == null) ? 0 : rc.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SourceHadoopFsEndPoint other = (SourceHadoopFsEndPoint) obj;
if (rc == null) {
if (other.rc != null) {
return false;
}
} else if (!rc.equals(other.rc)) {
return false;
}
return true;
}
}
| 2,659 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ReplicaHadoopFsEndPoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Charsets;
import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.io.CharStreams;
import com.typesafe.config.Config;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
import org.apache.gobblin.util.FileListUtils;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class ReplicaHadoopFsEndPoint extends HadoopFsEndPoint {
public static final String WATERMARK_FILE = "_metadata";
public static final String LATEST_TIMESTAMP = "latestTimestamp";
@Getter
private final HadoopFsReplicaConfig rc;
@Getter
private final String replicaName;
@Getter
private final Config selectionConfig;
private boolean watermarkInitialized = false;
private boolean filesInitialized = false;
private Optional<ComparableWatermark> cachedWatermark = Optional.absent();
private Collection<FileStatus> allFileStatus = new ArrayList<>();
public ReplicaHadoopFsEndPoint(HadoopFsReplicaConfig rc, String replicaName, Config selectionConfig) {
Preconditions.checkArgument(!replicaName.equals(ReplicationConfiguration.REPLICATION_SOURCE),
"replicaName can not be " + ReplicationConfiguration.REPLICATION_SOURCE);
this.rc = rc;
this.replicaName = replicaName;
this.selectionConfig = selectionConfig;
}
@Override
public synchronized Collection<FileStatus> getFiles() throws IOException {
if (filesInitialized) {
return this.allFileStatus;
}
this.filesInitialized = true;
FileSystem fs = FileSystem.get(rc.getFsURI(), new Configuration());
if (!fs.exists(this.rc.getPath())) {
return Collections.emptyList();
}
Collection<Path> validPaths = ReplicationDataValidPathPicker.getValidPaths(this);
//ReplicationDataValidPathPicker.getValidPaths(fs, this.rc.getPath(), this.rdc);
for (Path p : validPaths) {
this.allFileStatus.addAll(
FileListUtils.listFilesRecursively(fs, p, super.getPathFilter(), super.isApplyFilterToDirectories()));
}
return this.allFileStatus;
}
@Override
public synchronized Optional<ComparableWatermark> getWatermark() {
if (this.watermarkInitialized) {
return this.cachedWatermark;
}
this.watermarkInitialized = true;
try {
Path metaData = new Path(rc.getPath(), WATERMARK_FILE);
FileSystem fs = FileSystem.get(rc.getFsURI(), new Configuration());
if (fs.exists(metaData)) {
try (FSDataInputStream fin = fs.open(metaData)) {
InputStreamReader reader = new InputStreamReader(fin, Charsets.UTF_8);
String content = CharStreams.toString(reader);
Watermark w = WatermarkMetadataUtil.deserialize(content);
if (w instanceof ComparableWatermark) {
this.cachedWatermark = Optional.of((ComparableWatermark) w);
}
}
return this.cachedWatermark;
}
// for replica, can not use the file time stamp as that is different with original source time stamp
return this.cachedWatermark;
} catch (IOException e) {
log.warn("Can not find " + WATERMARK_FILE + " for replica " + this);
return this.cachedWatermark;
} catch (WatermarkMetadataUtil.WatermarkMetadataMulFormatException e) {
log.warn("Can not create watermark from " + WATERMARK_FILE + " for replica " + this);
return this.cachedWatermark;
}
}
@Override
public boolean isSource() {
return false;
}
@Override
public String getEndPointName() {
return this.replicaName;
}
@Override
public String getClusterName() {
return this.rc.getClustername();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this.getClass())
.add("is source", this.isSource())
.add("end point name", this.getEndPointName())
.add("hadoopfs config", this.rc)
.toString();
}
@Override
public URI getFsURI() {
return this.rc.getFsURI();
}
@Override
public Path getDatasetPath() {
return this.rc.getPath();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((rc == null) ? 0 : rc.hashCode());
result = prime * result + ((replicaName == null) ? 0 : replicaName.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ReplicaHadoopFsEndPoint other = (ReplicaHadoopFsEndPoint) obj;
if (rc == null) {
if (other.rc != null) {
return false;
}
} else if (!rc.equals(other.rc)) {
return false;
}
if (replicaName == null) {
if (other.replicaName != null) {
return false;
}
} else if (!replicaName.equals(other.replicaName)) {
return false;
}
return true;
}
}
| 2,660 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/HadoopFsReplicaConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.Path;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import lombok.Getter;
/**
* Used to encapsulate all the configuration for a hadoop file system replica
* @author mitu
*
*/
public class HadoopFsReplicaConfig {
public static final String HDFS_COLO_KEY = "cluster.colo";
public static final String HDFS_CLUSTERNAME_KEY = "cluster.name";
public static final String HDFS_FILESYSTEM_URI_KEY = "cluster.FsURI";
public static final String HDFS_PATH_KEY = "path";
@Getter
private final String colo;
@Getter
private final String clustername;
@Getter
private final URI fsURI;
@Getter
private final Path path;
public HadoopFsReplicaConfig(Config config) {
Preconditions.checkArgument(config.hasPath(HDFS_COLO_KEY));
Preconditions.checkArgument(config.hasPath(HDFS_CLUSTERNAME_KEY));
Preconditions.checkArgument(config.hasPath(HDFS_PATH_KEY));
Preconditions.checkArgument(config.hasPath(HDFS_FILESYSTEM_URI_KEY));
this.colo = config.getString(HDFS_COLO_KEY);
this.clustername = config.getString(HDFS_CLUSTERNAME_KEY);
this.path = new Path(config.getString(HDFS_PATH_KEY));
try {
this.fsURI = new URI(config.getString(HDFS_FILESYSTEM_URI_KEY));
} catch (URISyntaxException e) {
throw new RuntimeException("can not build URI based on " + config.getString(HDFS_FILESYSTEM_URI_KEY));
}
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this.getClass()).add("colo", this.colo).add("name", this.clustername)
.add("FilesystemURI", this.fsURI).add("rootPath", this.path).toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((clustername == null) ? 0 : clustername.hashCode());
result = prime * result + ((colo == null) ? 0 : colo.hashCode());
result = prime * result + ((fsURI == null) ? 0 : fsURI.hashCode());
result = prime * result + ((path == null) ? 0 : path.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
HadoopFsReplicaConfig other = (HadoopFsReplicaConfig) obj;
if (clustername == null) {
if (other.clustername != null)
return false;
} else if (!clustername.equals(other.clustername))
return false;
if (colo == null) {
if (other.colo != null)
return false;
} else if (!colo.equals(other.colo))
return false;
if (fsURI == null) {
if (other.fsURI != null)
return false;
} else if (!fsURI.equals(other.fsURI))
return false;
if (path == null) {
if (other.path != null)
return false;
} else if (!path.equals(other.path))
return false;
return true;
}
}
| 2,661 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/WatermarkMetadataGenerationCommitStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Charsets;
import com.google.common.base.MoreObjects;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.source.extractor.Watermark;
/**
* A {@link CommitStep} to write watermark metadata to Hdfs
* @author mitu
*
*/
public class WatermarkMetadataGenerationCommitStep implements CommitStep {
private final String fsUriString;
private final Path targetDirPath;
private final Watermark watermark;
private boolean completed = false;
public WatermarkMetadataGenerationCommitStep(String fsString, Path targetDirPath, Watermark wm) {
this.fsUriString = fsString;
this.targetDirPath = targetDirPath;
this.watermark = wm;
}
@Override
public boolean isCompleted() throws IOException {
return this.completed;
}
@Override
public String toString(){
return MoreObjects.toStringHelper(this.getClass())
.add("metafile",new Path(this.targetDirPath, ReplicaHadoopFsEndPoint.WATERMARK_FILE))
.add("file system uri", this.fsUriString)
.add("watermark class", this.watermark.getClass().getCanonicalName())
.add("watermark json", this.watermark.toJson().toString())
.toString();
}
@Override
public void execute() throws IOException {
URI fsURI;
try {
fsURI = new URI(this.fsUriString);
} catch (URISyntaxException e) {
throw new IOException("can not build URI " + this.fsUriString, e);
}
FileSystem fs = FileSystem.get(fsURI, new Configuration());
Path filenamePath = new Path(this.targetDirPath, ReplicaHadoopFsEndPoint.WATERMARK_FILE);
if (fs.exists(filenamePath)) {
fs.delete(filenamePath, false);
}
FSDataOutputStream fout = fs.create(filenamePath);
fout.write(WatermarkMetadataUtil.serialize(this.watermark).getBytes(Charsets.UTF_8));
fout.close();
this.completed = true;
}
}
| 2,662 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ReplicationMetaData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Joiner;
import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import lombok.Data;
/**
* Class used to represent the meta data of the replication
* @author mitu
*
*/
@Data
public class ReplicationMetaData {
private final Optional<Map<String, String>> values;
public static ReplicationMetaData buildMetaData(Config config) {
if (!config.hasPath(ReplicationConfiguration.METADATA)) {
return new ReplicationMetaData(Optional.<Map<String, String>> absent());
}
Config metaDataConfig = config.getConfig(ReplicationConfiguration.METADATA);
Map<String, String> metaDataValues = new HashMap<>();
Set<Map.Entry<String, ConfigValue>> meataDataEntry = metaDataConfig.entrySet();
for (Map.Entry<String, ConfigValue> entry : meataDataEntry) {
metaDataValues.put(entry.getKey(), metaDataConfig.getString(entry.getKey()));
}
ReplicationMetaData metaData = new ReplicationMetaData(Optional.of(metaDataValues));
return metaData;
}
@Override
public String toString() {
Joiner.MapJoiner mapJoiner = Joiner.on(',').withKeyValueSeparator("=");
return MoreObjects.toStringHelper(this.getClass()).add("metadata", mapJoiner.join(this.values.get())).toString();
}
}
| 2,663 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/replication/ReplicationConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.filesystem.DataFileVersionStrategy;
import lombok.Getter;
/**
* Class ReplicationConfiguration is used to describe the overall configuration of the replication flow for
* a dataset, including:
* <ul>
* <li>Replication copy mode {@link ReplicationCopyMode}
* <li>Meta data {@link ReplicationMetaData}
* <li>Replication source {@link EndPoint}
* <li>Replication replicas {@link EndPoint}
* <li>Replication topology {@link DataFlowTopology}
* </ul>
* @author mitu
*
*/
public class ReplicationConfiguration {
public static final String REPLICATION_COPY_MODE = "copymode";
public static final String METADATA = "metadata";
public static final String METADATA_JIRA = "jira";
public static final String METADATA_OWNER = "owner";
public static final String METADATA_NAME = "name";
public static final String REPLICATION_SOURCE = "source";
public static final String REPLICATION_REPLICAS = "replicas";
public static final String REPLICATOIN_REPLICAS_LIST = "list";
public static final String DATA_FLOW_TOPOLOGY = "dataFlowTopology";
public static final String DATA_FLOW_TOPOLOGY_ROUTES = "routes";
public static final String DEFAULT_DATA_FLOW_TOPOLOGIES_PUSHMODE = "defaultDataFlowTopologies_PushMode";
public static final String DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE = "defaultDataFlowTopologies_PullMode";
public static final String REPLICATION_DATA_CATETORY_TYPE = "replicationDataCategoryType";
public static final String REPLICATION_DATA_FINITE_INSTANCE = "replicationDataFiniteInstance";
public static final String COPY_SCHEMA_CHECK_ENABLED = "gobblin.copy.schemaCheck.enabled";
public static final boolean DEFAULT_COPY_SCHEMA_CHECK_ENABLED = true;
//copy route generator
public static final String DELETE_TARGET_IFNOT_ON_SOURCE = "deleteTarget";
// data flow picker
public static final String DATA_FLOW_TOPOLOGY_PICKER_CLASS = "dataFlowTopologyPickerClass";
public static final String DEFAULT_DATA_FLOW_TOPOLOGY_PICKER_CLASS =
DataFlowTopologyPickerByHadoopFsSource.class.getCanonicalName();
public static final ClassAliasResolver<DataFlowTopologyPickerBySource> dataFlowTopologyPickerResolver =
new ClassAliasResolver<>(DataFlowTopologyPickerBySource.class);
// end point factory
public static final String END_POINT_FACTORY_CLASS = "endPointFactoryClass";
public static final String DEFAULT_END_POINT_FACTORY_CLASS = HadoopFsEndPointFactory.class.getCanonicalName();
public static final ClassAliasResolver<EndPointFactory> endPointFactoryResolver =
new ClassAliasResolver<>(EndPointFactory.class);
// copy route generator
public static final String COPYROUTE_OPTIMIZER_CLASS = "copyRouteOptimizerClass";
public static final String DEFAULT_COPYROUTE_OPTIMIZER_CLASS = CopyRouteGeneratorOptimizedNetworkBandwidth.class.getCanonicalName();
public static final ClassAliasResolver<CopyRouteGenerator> copyRouteGeneratorResolver =
new ClassAliasResolver<>(CopyRouteGenerator.class);
@Getter
private final ReplicationCopyMode copyMode;
@Getter
private final boolean schemaCheckEnabled;
@Getter
private final Config selectionConfig;
@Getter
private final ReplicationMetaData metaData;
@Getter
private final EndPoint source;
@Getter
private final List<EndPoint> replicas;
@Getter
private final DataFlowTopology dataFlowToplogy;
@Getter
private final CopyRouteGenerator copyRouteGenerator;
@Getter
private final boolean deleteTargetIfNotExistOnSource;
@Getter
private final Optional<String> versionStrategyFromConfigStore;
@Getter
private final Optional<Boolean> enforceFileSizeMatchFromConfigStore;
public static ReplicationConfiguration buildFromConfig(Config input)
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
Preconditions.checkArgument(input != null, "can not build ReplicationConfig from null");
Config config = input.resolve();
return new Builder().withReplicationMetaData(ReplicationMetaData.buildMetaData(config))
.withReplicationCopyMode(ReplicationCopyMode.getReplicationCopyMode(config))
.withSelectionConfig(config.getConfig("gobblin.selected.policy"))
.withReplicationSource(config)
.withReplicationReplica(config)
.withDefaultDataFlowTopologyConfig_PullMode(config)
.withDefaultDataFlowTopologyConfig_PushMode(config)
.withDataFlowTopologyConfig(config)
.withCopyRouteGenerator(config)
.withDeleteTarget(config)
.withVersionStrategyFromConfigStore(config)
.withEnforceFileSizeMatchFromConfigStore(config)
.withSchemaCheckEnabled(config)
.build();
}
private ReplicationConfiguration(Builder builder) {
this.metaData = builder.metaData;
this.source = builder.source;
this.replicas = builder.replicas;
this.copyMode = builder.copyMode;
this.selectionConfig = builder.selectionConfig;
this.dataFlowToplogy = builder.dataFlowTopology;
this.copyRouteGenerator = builder.copyRouteGenerator;
this.deleteTargetIfNotExistOnSource = builder.deleteTargetIfNotExistOnSource;
this.versionStrategyFromConfigStore = builder.versionStrategyFromConfigStore;
this.enforceFileSizeMatchFromConfigStore = builder.enforceFileMatchFromConfigStore;
this.schemaCheckEnabled = builder.schemaCheckEnabled;
}
private static class Builder {
private boolean schemaCheckEnabled;
private ReplicationMetaData metaData;
private EndPoint source;
private List<EndPoint> replicas = new ArrayList<EndPoint>();
private ReplicationCopyMode copyMode;
private Config selectionConfig;
private Config dataFlowTopologyConfig;
private Optional<Config> defaultDataFlowTopology_PushModeConfig;
private Optional<Config> defaultDataFlowTopology_PullModeConfig;
private DataFlowTopology dataFlowTopology = new DataFlowTopology();
private CopyRouteGenerator copyRouteGenerator;
private boolean deleteTargetIfNotExistOnSource = false;
private Optional<String> versionStrategyFromConfigStore = Optional.absent();
private Optional<Boolean> enforceFileMatchFromConfigStore = Optional.absent();
public Builder withEnforceFileSizeMatchFromConfigStore(Config config) {
this.enforceFileMatchFromConfigStore = config.hasPath(CopyConfiguration.ENFORCE_FILE_LENGTH_MATCH)?
Optional.of(config.getBoolean(CopyConfiguration.ENFORCE_FILE_LENGTH_MATCH)) :
Optional.absent();
return this;
}
public Builder withSchemaCheckEnabled(Config config) {
this.schemaCheckEnabled = ConfigUtils.getBoolean(config, COPY_SCHEMA_CHECK_ENABLED, DEFAULT_COPY_SCHEMA_CHECK_ENABLED);
return this;
}
public Builder withVersionStrategyFromConfigStore(Config config) {
this.versionStrategyFromConfigStore = config.hasPath(DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY)?
Optional.of(config.getString(DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY)) :
Optional.absent();
return this;
}
public Builder withReplicationMetaData(ReplicationMetaData metaData) {
this.metaData = metaData;
return this;
}
public Builder withDeleteTarget(Config config){
if(config.hasPath(DELETE_TARGET_IFNOT_ON_SOURCE)){
this.deleteTargetIfNotExistOnSource = config.getBoolean(DELETE_TARGET_IFNOT_ON_SOURCE);
}
return this;
}
public Builder withCopyRouteGenerator(Config config)
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
String copyRouteGeneratorStr = config.hasPath(COPYROUTE_OPTIMIZER_CLASS)?
config.getString(COPYROUTE_OPTIMIZER_CLASS): DEFAULT_COPYROUTE_OPTIMIZER_CLASS;
this.copyRouteGenerator = copyRouteGeneratorResolver.resolveClass(copyRouteGeneratorStr).newInstance();
return this;
}
public Builder withReplicationSource(Config config)
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
Preconditions.checkArgument(config.hasPath(REPLICATION_SOURCE),
"missing required config entry " + REPLICATION_SOURCE);
Config sourceConfig = config.getConfig(REPLICATION_SOURCE);
String endPointFactory = sourceConfig.hasPath(END_POINT_FACTORY_CLASS)
? sourceConfig.getString(END_POINT_FACTORY_CLASS) : DEFAULT_END_POINT_FACTORY_CLASS;
EndPointFactory factory = endPointFactoryResolver.resolveClass(endPointFactory).newInstance();
this.source = factory.buildSource(sourceConfig, this.selectionConfig);
return this;
}
public Builder withReplicationReplica(Config config)
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
Preconditions.checkArgument(config.hasPath(REPLICATION_REPLICAS),
"missing required config entery " + REPLICATION_REPLICAS);
Config replicasConfig = config.getConfig(REPLICATION_REPLICAS);
Preconditions.checkArgument(replicasConfig.hasPath(REPLICATOIN_REPLICAS_LIST),
"missing required config entery " + REPLICATOIN_REPLICAS_LIST);
List<String> replicaNames = replicasConfig.getStringList(REPLICATOIN_REPLICAS_LIST);
for (String replicaName : replicaNames) {
Preconditions.checkArgument(replicasConfig.hasPath(replicaName), "missing replica name " + replicaName);
Config subConfig = replicasConfig.getConfig(replicaName);
// each replica could have own EndPointFactory resolver
String endPointFactory = subConfig.hasPath(END_POINT_FACTORY_CLASS)
? subConfig.getString(END_POINT_FACTORY_CLASS) : DEFAULT_END_POINT_FACTORY_CLASS;
EndPointFactory factory = endPointFactoryResolver.resolveClass(endPointFactory).newInstance();
this.replicas.add(factory.buildReplica(subConfig, replicaName, this.selectionConfig));
}
return this;
}
public Builder withDataFlowTopologyConfig(Config config) {
Preconditions.checkArgument(config.hasPath(DATA_FLOW_TOPOLOGY),
"missing required config entery " + DATA_FLOW_TOPOLOGY);
this.dataFlowTopologyConfig = config.getConfig(DATA_FLOW_TOPOLOGY);
return this;
}
public Builder withDefaultDataFlowTopologyConfig_PushMode(Config config) {
if (config.hasPath(DEFAULT_DATA_FLOW_TOPOLOGIES_PUSHMODE)) {
this.defaultDataFlowTopology_PushModeConfig =
Optional.of(config.getConfig(DEFAULT_DATA_FLOW_TOPOLOGIES_PUSHMODE));
} else {
this.defaultDataFlowTopology_PushModeConfig = Optional.absent();
}
return this;
}
public Builder withDefaultDataFlowTopologyConfig_PullMode(Config config) {
if (config.hasPath(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE)) {
this.defaultDataFlowTopology_PullModeConfig =
Optional.of(config.getConfig(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE));
} else {
this.defaultDataFlowTopology_PullModeConfig = Optional.absent();
}
return this;
}
public Builder withReplicationCopyMode(ReplicationCopyMode copyMode) {
this.copyMode = copyMode;
return this;
}
public Builder withSelectionConfig(Config selectionConfig) {
this.selectionConfig = selectionConfig;
return this;
}
private void constructDataFlowTopology()
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
if (this.dataFlowTopologyConfig.hasPath(DATA_FLOW_TOPOLOGY_ROUTES)) {
Config routesConfig = dataFlowTopologyConfig.getConfig(DATA_FLOW_TOPOLOGY_ROUTES);
constructDataFlowTopologyWithConfig(routesConfig);
return;
}
// topology not specified in literal, need to pick one topology from the defaults
String topologyPickerStr = this.dataFlowTopologyConfig.hasPath(DATA_FLOW_TOPOLOGY_PICKER_CLASS)?
this.dataFlowTopologyConfig.getString(DATA_FLOW_TOPOLOGY_PICKER_CLASS): DEFAULT_DATA_FLOW_TOPOLOGY_PICKER_CLASS;
DataFlowTopologyPickerBySource picker =
dataFlowTopologyPickerResolver.resolveClass(topologyPickerStr).newInstance();
if (this.copyMode == ReplicationCopyMode.PULL) {
Preconditions.checkArgument(this.defaultDataFlowTopology_PullModeConfig.isPresent(),
"No topology to pick in pull mode");
Config preferredTopology =
picker.getPreferredRoutes(this.defaultDataFlowTopology_PullModeConfig.get(), this.source);
Config routesConfig = preferredTopology.getConfig(DATA_FLOW_TOPOLOGY_ROUTES);
constructDataFlowTopologyWithConfig(routesConfig);
} else {
Preconditions.checkArgument(this.defaultDataFlowTopology_PushModeConfig.isPresent(),
"No topology to pick in push mode");
Config preferredTopology =
picker.getPreferredRoutes(this.defaultDataFlowTopology_PushModeConfig.get(), this.source);
Config routesConfig = preferredTopology.getConfig(DATA_FLOW_TOPOLOGY_ROUTES);
constructDataFlowTopologyWithConfig(routesConfig);
}
}
private void constructDataFlowTopologyWithConfig(Config routesConfig) {
Preconditions.checkArgument(routesConfig != null && !routesConfig.isEmpty(),
"Can not build topology without empty config");
Preconditions.checkArgument(this.source != null, "Can not build topology without source");
Preconditions.checkArgument(this.replicas.size() != 0, "Can not build topology without replicas");
final Map<String, EndPoint> validEndPoints = new HashMap<>();
validEndPoints.put(this.source.getEndPointName(), this.source);
for (EndPoint p : this.replicas) {
validEndPoints.put(p.getEndPointName(), p);
}
// PULL mode
if (this.copyMode == ReplicationCopyMode.PULL) {
// copy to original source will be ignored
for (final EndPoint replica : this.replicas) {
Preconditions.checkArgument(routesConfig.hasPath(replica.getEndPointName()),
"Can not find the pull flow for replia " + replica.getEndPointName());
List<String> copyFromStringsRaw = routesConfig.getStringList(replica.getEndPointName());
// filter out invalid entries
List<String> copyFromStrings = new ArrayList<>();
for(String s: copyFromStringsRaw){
if(validEndPoints.containsKey(s)){
copyFromStrings.add(s);
}
}
List<CopyRoute> copyPairs = Lists.transform(copyFromStrings, new Function<String, CopyRoute>() {
@Override
public CopyRoute apply(String t) {
// create CopyPair in Pull mode
return new CopyRoute(validEndPoints.get(t), replica);
}
});
DataFlowTopology.DataFlowPath dataFlowPath = new DataFlowTopology.DataFlowPath(copyPairs);
this.dataFlowTopology.addDataFlowPath(dataFlowPath);
}
}
// PUSH mode
else {
Set<String> currentCopyTo = new HashSet<>();
for (final Map.Entry<String, EndPoint> valid : validEndPoints.entrySet()) {
// Only generate copyRoute from the EndPoint that running this job.
if (routesConfig.hasPath(valid.getKey())) {
List<String> copyToStringsRaw = routesConfig.getStringList(valid.getKey());
List<String> copyToStrings = new ArrayList<>();
for(String s: copyToStringsRaw){
if(!s.equals(this.source.getEndPointName()) &&validEndPoints.containsKey(s)){
copyToStrings.add(s);
}
}
// filter out invalid entries
for (String s : copyToStrings) {
Preconditions.checkArgument(!currentCopyTo.contains(s),
"In Push mode, can not have multiple copies to " + s);
}
currentCopyTo.addAll(copyToStrings);
List<CopyRoute> copyPairs = Lists.transform(copyToStrings, new Function<String, CopyRoute>() {
@Override
public CopyRoute apply(String t) {
// create CopyPair in Push mode
return new CopyRoute(valid.getValue(), validEndPoints.get(t));
}
});
DataFlowTopology.DataFlowPath dataFlowPath = new DataFlowTopology.DataFlowPath(copyPairs);
this.dataFlowTopology.addDataFlowPath(dataFlowPath);
}
}
Preconditions.checkArgument(currentCopyTo.size() == this.replicas.size(),
"Not all replicas have valid data flow in push mode");
}
}
public ReplicationConfiguration build()
throws InstantiationException, IllegalAccessException, ClassNotFoundException {
constructDataFlowTopology();
return new ReplicationConfiguration(this);
}
}
}
| 2,664 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/entities/PrePublishStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.entities;
import java.util.Map;
import org.apache.gobblin.commit.CommitStep;
/**
* A {@link CommitStepCopyEntity} whose step will be executed before publishing files.
* The priority sets an order among {@link PrePublishStep} in which they will be executed.
*/
public class PrePublishStep extends CommitStepCopyEntity {
public PrePublishStep(String fileSet, Map<String, String> additionalMetadata, CommitStep step, int priority) {
super(fileSet, additionalMetadata, step, priority);
}
@Override
public String explain() {
return String.format("Pre publish step with priority %s: %s", this.getPriority(), getStep().toString());
}
}
| 2,665 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/entities/CommitStepCopyEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.entities;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.util.Map;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.data.management.copy.CopyEntity;
/**
* A {@link CopyEntity} encapsulating a {@link CommitStep}.
*/
@EqualsAndHashCode(callSuper = true)
public class CommitStepCopyEntity extends CopyEntity {
@Getter
private final CommitStep step;
/** A priority value that can be used for sorting {@link CommitStepCopyEntity}s. Lower values are higher priority.*/
@Getter
private final int priority;
public CommitStepCopyEntity(String fileSet, Map<String, String> additionalMetadata, CommitStep step, int priority) {
super(fileSet, additionalMetadata);
this.step = step;
this.priority = priority;
}
@Override
public String explain() {
return this.step.toString();
}
}
| 2,666 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/entities/PostPublishStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.entities;
import java.util.Map;
import org.apache.gobblin.commit.CommitStep;
/**
* A {@link CommitStepCopyEntity} whose step will be executed after publishing files.
* The priority sets an order among {@link PostPublishStep} in which they will be executed.
*/
public class PostPublishStep extends CommitStepCopyEntity {
public PostPublishStep(String fileSet, Map<String, String> additionalMetadata, CommitStep step, int priority) {
super(fileSet, additionalMetadata, step, priority);
}
@Override
public String explain() {
return String.format("Post publish step with priority %s: %s", this.getPriority(), getStep().toString());
}
}
| 2,667 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/partition/FileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.Setter;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.request_allocation.Request;
import org.apache.gobblin.util.request_allocation.Requestor;
/**
* A named subset of {@link File}s in a {@link Dataset}. (Useful for partitions, versions, etc.).
*
* The actual list of files in this {@link FileSet} is, in ideal circumstances, generated lazily. As such, the method
* {@link #getFiles()} should only be called when the actual list of files is needed.
*/
@RequiredArgsConstructor(access = AccessLevel.PROTECTED)
public abstract class FileSet<T extends CopyEntity> implements Request<FileSet<CopyEntity>> {
/**
* A builder for {@link StaticFileSet} provided for backwards compatibility. The output of this builder is not lazy.
*/
public static class Builder<T extends CopyEntity> {
private final String name;
private final List<T> files = Lists.newArrayList();
private final Dataset dataset;
public Builder(String name, Dataset dataset) {
if (name == null) {
throw new RuntimeException("Name cannot be null.");
}
this.name = name;
this.dataset = dataset;
}
public Builder<T> add(T t) {
this.files.add(t);
return this;
}
public Builder<T> add(Collection<T> collection) {
this.files.addAll(collection);
return this;
}
public FileSet<T> build() {
return new StaticFileSet<>(this.name, this.dataset, this.files);
}
}
@Getter
@NonNull private final String name;
@Getter
private final Dataset dataset;
private ImmutableList<T> generatedEntities;
private long totalSize = -1;
private int totalEntities = -1;
@Setter
@Getter
private Requestor<FileSet<CopyEntity>> requestor;
public ImmutableList<T> getFiles() {
ensureFilesGenerated();
return this.generatedEntities;
}
public long getTotalSizeInBytes() {
ensureStatsComputed();
return this.totalSize;
}
public int getTotalEntities() {
ensureStatsComputed();
return this.totalEntities;
}
private void ensureFilesGenerated() {
if (this.generatedEntities == null) {
try {
this.generatedEntities = ImmutableList.copyOf(generateCopyEntities());
} catch (Exception exc) {
throw new RuntimeException("Failed to generate files for file set " + name, exc);
}
recomputeStats();
}
}
private void ensureStatsComputed() {
ensureFilesGenerated();
if (this.totalEntities < 0 || this.totalSize < 0) {
recomputeStats();
}
}
private void recomputeStats() {
this.totalEntities = this.generatedEntities.size();
this.totalSize = 0;
for (CopyEntity copyEntity : this.generatedEntities) {
if (copyEntity instanceof CopyableFile) {
this.totalSize += ((CopyableFile) copyEntity).getOrigin().getLen();
}
}
}
/**
* This method is called lazily when needed and only once, it is intended to do the heavy work of generating the
* {@link CopyEntity}s.
* @return The {@link Collection} of {@link CopyEntity}s in this file set.
* @throws IOException
*/
protected abstract Collection<T> generateCopyEntities() throws IOException;
@Override
public String toString() {
return this.dataset.datasetURN() + "@" + this.name;
}
}
| 2,668 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/partition/File.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import org.apache.hadoop.fs.FileStatus;
/**
* Interface representing a File.
*
* This interface is implemented by file abstractions such as {@link org.apache.gobblin.data.management.copy.CopyEntity}.
*
*/
public interface File {
public FileStatus getFileStatus();
}
| 2,669 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/partition/FileSetResourceEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyResourcePool;
import org.apache.gobblin.util.request_allocation.ResourceEstimator;
import org.apache.gobblin.util.request_allocation.ResourcePool;
import org.apache.gobblin.util.request_allocation.ResourceRequirement;
/**
* A {@link ResourceEstimator} that uses a {@link CopyResourcePool} and populates a {@link ResourceRequirement} for a
* distcp {@link FileSet}.
*/
public class FileSetResourceEstimator implements ResourceEstimator<FileSet<CopyEntity>> {
static class Factory implements ResourceEstimator.Factory<FileSet<CopyEntity>> {
@Override
public ResourceEstimator<FileSet<CopyEntity>> create(Config config) {
return new FileSetResourceEstimator();
}
}
@Override
public ResourceRequirement estimateRequirement(FileSet<CopyEntity> copyEntityFileSet, ResourcePool pool) {
if (!(pool instanceof CopyResourcePool)) {
throw new IllegalArgumentException("Must use a " + CopyResourcePool.class.getSimpleName());
}
CopyResourcePool copyResourcePool = (CopyResourcePool) pool;
return copyResourcePool.getCopyResourceRequirementBuilder().setEntities(copyEntityFileSet.getTotalEntities())
.setBytes(copyEntityFileSet.getTotalSizeInBytes()).build();
}
}
| 2,670 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/partition/StaticFileSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.dataset.Dataset;
/**
* A non-lazy {@link FileSet} where the copy entities are a static, eagerly computed list of {@link CopyEntity}s.
* @param <T>
*/
public class StaticFileSet<T extends CopyEntity> extends FileSet<T> {
private final List<T> files;
public StaticFileSet(String name, Dataset dataset, List<T> files) {
super(name, dataset);
this.files = files;
}
@Override
protected Collection<T> generateCopyEntities()
throws IOException {
return this.files;
}
}
| 2,671 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/partition/CopyableDatasetRequestor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableDatasetBase;
import org.apache.gobblin.data.management.copy.IterableCopyableDataset;
import org.apache.gobblin.data.management.copy.IterableCopyableDatasetImpl;
import org.apache.gobblin.data.management.copy.prioritization.PrioritizedCopyableDataset;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* A wrapper around a {@link CopyableDatasetBase} that makes it a {@link PushDownRequestor} for prioritization.
*/
@Slf4j
@AllArgsConstructor
@Getter
public class CopyableDatasetRequestor implements PushDownRequestor<FileSet<CopyEntity>> {
@AllArgsConstructor
public static class Factory implements Function<CopyableDatasetBase, CopyableDatasetRequestor> {
private final FileSystem targetFs;
private final CopyConfiguration copyConfiguration;
private final Logger log;
@Nullable
@Override
public CopyableDatasetRequestor apply(CopyableDatasetBase input) {
IterableCopyableDataset iterableCopyableDataset;
if (input instanceof IterableCopyableDataset) {
iterableCopyableDataset = (IterableCopyableDataset) input;
} else if (input instanceof CopyableDataset) {
iterableCopyableDataset = new IterableCopyableDatasetImpl((CopyableDataset) input);
} else {
log.error(String.format("Cannot process %s, can only copy %s or %s.",
input == null ? null : input.getClass().getName(),
CopyableDataset.class.getName(), IterableCopyableDataset.class.getName()));
return null;
}
return new CopyableDatasetRequestor(this.targetFs, this.copyConfiguration, iterableCopyableDataset);
}
}
private final FileSystem targetFs;
private final CopyConfiguration copyConfiguration;
private final IterableCopyableDataset dataset;
@Override
public Iterator<FileSet<CopyEntity>> iterator() {
try {
return injectRequestor(this.dataset.getFileSetIterator(this.targetFs, this.copyConfiguration));
} catch (Throwable exc) {
if (copyConfiguration.isAbortOnSingleDatasetFailure()) {
throw new RuntimeException(String.format("Could not get FileSets for dataset %s", this.dataset.datasetURN()), exc);
}
log.error(String.format("Could not get FileSets for dataset %s. Skipping.", this.dataset.datasetURN()), exc);
return Collections.emptyIterator();
}
}
@Override
public Iterator<FileSet<CopyEntity>> getRequests(Comparator<FileSet<CopyEntity>> prioritizer) throws IOException {
if (this.dataset instanceof PrioritizedCopyableDataset) {
return ((PrioritizedCopyableDataset) this.dataset)
.getFileSetIterator(this.targetFs, this.copyConfiguration, prioritizer, this);
}
List<FileSet<CopyEntity>> entities =
Lists.newArrayList(injectRequestor(this.dataset.getFileSetIterator(this.targetFs, this.copyConfiguration)));
Collections.sort(entities, prioritizer);
return entities.iterator();
}
private Iterator<FileSet<CopyEntity>> injectRequestor(Iterator<FileSet<CopyEntity>> iterator) {
return Iterators.transform(iterator, new Function<FileSet<CopyEntity>, FileSet<CopyEntity>>() {
@Override
public FileSet<CopyEntity> apply(FileSet<CopyEntity> input) {
input.setRequestor(CopyableDatasetRequestor.this);
return input;
}
});
}
}
| 2,672 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/SelectAfterTimeBasedPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Properties;
import lombok.ToString;
import org.joda.time.Period;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* Selects {@link TimestampedDatasetVersion}s newer than lookbackTime.
*/
@Alias("SelectAfterTimeBasedPolicy")
@ToString(callSuper=true)
public class SelectAfterTimeBasedPolicy extends SelectBetweenTimeBasedPolicy {
public static final String TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY = "selection.timeBased.lookbackTime";
public SelectAfterTimeBasedPolicy(Config conf) {
super(Optional.<Period>absent(), Optional.of(getMaxLookbackTime(conf)));
}
public SelectAfterTimeBasedPolicy(Properties props) {
this(ConfigUtils.propertiesToConfig(props));
}
private static Period getMaxLookbackTime(Config conf) {
Preconditions.checkArgument(conf.hasPath(TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY),
String.format("Required property %s is not specified", TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY));
return SelectBetweenTimeBasedPolicy.getLookBackPeriod(conf.getString(TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY));
}
}
| 2,673 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/SelectNothingPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import lombok.ToString;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* Implementation of {@link VersionSelectionPolicy} that selects nothing.
*/
@ToString
public class SelectNothingPolicy implements VersionSelectionPolicy<FileSystemDatasetVersion> {
public SelectNothingPolicy(Properties properties) {}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return FileSystemDatasetVersion.class;
}
@Override
public Collection<FileSystemDatasetVersion> listSelectedVersions(List<FileSystemDatasetVersion> allVersions) {
return Lists.newArrayList();
}
}
| 2,674 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/SelectBetweenTimeBasedPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import lombok.ToString;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
/**
* Policy used to select versions in a time range. It selects {@link TimestampedDatasetVersion}s that are not older than
* <code>maxLookBackPeriod</code> and not newer than <code>minLookBackPeriod</code>.
* <ul>
* <li> If minLookbackTime is absent, the current time is used as min lookback
* <li> If maxLookbackTime is absent, an infinite time is used for max lookback
* </ul>
*
*/
@Alias("SelectBetweenTimeBasedPolicy")
@ToString
public class SelectBetweenTimeBasedPolicy implements VersionSelectionPolicy<TimestampedDatasetVersion> {
protected final Optional<Period> minLookBackPeriod;
protected final Optional<Period> maxLookBackPeriod;
/**
* Optional max lookback time. Versions older than this will not be selected
*/
public static final String TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY = "selection.timeBased.maxLookbackTime";
/**
* Optional min lookback time. Versions newer than this will not be selected
*/
public static final String TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY = "selection.timeBased.minLookbackTime";
public SelectBetweenTimeBasedPolicy(Config conf) {
this(conf.hasPath(TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY) ? Optional.of(getLookBackPeriod(conf
.getString(TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY))) : Optional.<Period> absent(), conf
.hasPath(TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY) ? Optional.of(getLookBackPeriod(conf
.getString(TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY))) : Optional.<Period> absent());
}
public SelectBetweenTimeBasedPolicy(Optional<Period> minLookBackPeriod, Optional<Period> maxLookBackPeriod) {
this.minLookBackPeriod = minLookBackPeriod;
this.maxLookBackPeriod = maxLookBackPeriod;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> listSelectedVersions(List<TimestampedDatasetVersion> allVersions) {
return Lists.newArrayList(Collections2.filter(allVersions, getSelectionPredicate()));
}
private Predicate<TimestampedDatasetVersion> getSelectionPredicate() {
return new Predicate<TimestampedDatasetVersion>() {
@Override
public boolean apply(TimestampedDatasetVersion version) {
return version.getDateTime()
.plus(SelectBetweenTimeBasedPolicy.this.maxLookBackPeriod.or(new Period(DateTime.now().getMillis())))
.isAfterNow()
&& version.getDateTime().plus(SelectBetweenTimeBasedPolicy.this.minLookBackPeriod.or(new Period(0)))
.isBeforeNow();
}
};
}
protected static Period getLookBackPeriod(String lookbackTime) {
PeriodFormatter periodFormatter =
new PeriodFormatterBuilder().appendYears().appendSuffix("y").appendMonths().appendSuffix("M").appendDays()
.appendSuffix("d").appendHours().appendSuffix("h").appendMinutes().appendSuffix("m").toFormatter();
return periodFormatter.parsePeriod(lookbackTime);
}
}
| 2,675 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/HiddenFilterSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/*
* Select dataset versions that pass the hidden path filter i.e. accept paths that do not have sub-dirs whose names start with "." or "_".
*/
public class HiddenFilterSelectionPolicy implements VersionSelectionPolicy<FileSystemDatasetVersion> {
public static final String HIDDEN_FILTER_HIDDEN_FILE_PREFIX_KEY = "selection.hiddenFilter.hiddenFilePrefix";
private static final String[] DEFAULT_HIDDEN_FILE_PREFIXES = {".", "_"};
private List<String> hiddenFilePrefixes;
public HiddenFilterSelectionPolicy(Config config) {
if (config.hasPath(HIDDEN_FILTER_HIDDEN_FILE_PREFIX_KEY)) {
this.hiddenFilePrefixes = ConfigUtils.getStringList(config, HIDDEN_FILTER_HIDDEN_FILE_PREFIX_KEY);
} else {
this.hiddenFilePrefixes = Arrays.asList(DEFAULT_HIDDEN_FILE_PREFIXES);
}
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return FileSystemDatasetVersion.class;
}
private boolean isPathHidden(Path path) {
while (path != null) {
String name = path.getName();
for (String prefix : this.hiddenFilePrefixes) {
if (name.startsWith(prefix)) {
return true;
}
}
path = path.getParent();
}
return false;
}
private Predicate<FileSystemDatasetVersion> getSelectionPredicate() {
return new Predicate<FileSystemDatasetVersion>() {
@Override
public boolean apply(FileSystemDatasetVersion version) {
Set<Path> paths = version.getPaths();
for (Path path : paths) {
Path p = path.getPathWithoutSchemeAndAuthority(path);
if (isPathHidden(p)) {
return false;
}
}
return true;
}
};
}
@Override
public Collection<FileSystemDatasetVersion> listSelectedVersions(List<FileSystemDatasetVersion> allVersions) {
return Lists.newArrayList(Collections2.filter(allVersions, getSelectionPredicate()));
}
}
| 2,676 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/EmbeddedRetentionSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import lombok.AllArgsConstructor;
import lombok.ToString;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* A wrapper {@link VersionSelectionPolicy} that delegates calls to deprecated {@link RetentionPolicy}
*/
@AllArgsConstructor
@ToString
public class EmbeddedRetentionSelectionPolicy<T extends FileSystemDatasetVersion> implements VersionSelectionPolicy<T> {
private final RetentionPolicy<T> embeddedRetentionPolicy;
@SuppressWarnings("unchecked")
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return (Class<? extends FileSystemDatasetVersion>) this.embeddedRetentionPolicy.versionClass();
}
@SuppressWarnings("deprecation")
@Override
public Collection<T> listSelectedVersions(List<T> allVersions) {
return this.embeddedRetentionPolicy.listDeletableVersions(allVersions);
}
}
| 2,677 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/VersionSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* Selection policy around versions of a dataset. Specifies which versions of a dataset will be selected.
*/
public interface VersionSelectionPolicy<T extends DatasetVersion> {
/**
* Should return class of T.
* @return class of T.
*/
public Class<? extends DatasetVersion> versionClass();
/**
* Logic to decide which dataset versions will be selected.
*
* @param allVersions List of all dataset versions in the file system, sorted from newest to oldest.
* @return Collection of dataset versions that are selected.
*/
public Collection<T> listSelectedVersions(List<T> allVersions);
}
| 2,678 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/SelectAllPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import lombok.ToString;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* Implementation of {@link VersionSelectionPolicy} that selects all {@link FileSystemDatasetVersion}s.
*/
@ToString
public class SelectAllPolicy implements VersionSelectionPolicy<FileSystemDatasetVersion> {
public SelectAllPolicy(Properties properties) {}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return FileSystemDatasetVersion.class;
}
@Override
public Collection<FileSystemDatasetVersion> listSelectedVersions(List<FileSystemDatasetVersion> allVersions) {
return allVersions;
}
}
| 2,679 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/CombineSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import javax.annotation.Nullable;
import lombok.ToString;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.retention.policy.CombineRetentionPolicy;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Implementation of {@link org.apache.gobblin.data.management.policy.VersionSelectionPolicy} that allows combining different
* policies through a union or intersect operation. It will combine the selected sets from each sub-policy using the
* specified operation.
*
* <p>
* For example, if there are five versions of a dataset, a, b, c, d, e, policy1 would select versions a, b, while
* policy2 would select versions b,c, using {@link CombineSelectionPolicy} will select versions a, b, c if the
* operation is UNION, or it will select only version b if the operation is INTERSECT.
* </p>
*
* <p>
* {@link CombineRetentionPolicy} expects the following configurations:
* * version.selection.policy.class.* : specifies the classes of the policies to combine. * can be
* any value, and each such configuration defines only one class.
* * version.selection.combine.operation : operation used to combine delete
* sets. Can be UNION or INTERSECT.
* Additionally, any configuration necessary for combined policies must be specified.
* </p>
*/
@ToString
public class CombineSelectionPolicy implements VersionSelectionPolicy<DatasetVersion> {
public static final String VERSION_SELECTION_POLICIES_PREFIX = "selection.combine.policy.classes";
public static final String VERSION_SELECTION_COMBINE_OPERATION = "selection.combine.operation";
public enum CombineOperation {
INTERSECT, UNION
}
private final List<VersionSelectionPolicy<DatasetVersion>> selectionPolicies;
private final CombineOperation combineOperation;
public CombineSelectionPolicy(Config config)
throws IOException {
this(config, new Properties());
}
public CombineSelectionPolicy(List<VersionSelectionPolicy<DatasetVersion>> selectionPolicies,
CombineOperation combineOperation) {
this.combineOperation = combineOperation;
this.selectionPolicies = selectionPolicies;
}
@SuppressWarnings("unchecked")
public CombineSelectionPolicy(Config config, Properties jobProps)
throws IOException {
Preconditions.checkArgument(config.hasPath(VERSION_SELECTION_POLICIES_PREFIX), "Combine operation not specified.");
ImmutableList.Builder<VersionSelectionPolicy<DatasetVersion>> builder = ImmutableList.builder();
for (String combineClassName : ConfigUtils.getStringList(config, VERSION_SELECTION_POLICIES_PREFIX)) {
try {
builder.add((VersionSelectionPolicy<DatasetVersion>) GobblinConstructorUtils
.invokeFirstConstructor(Class.forName(combineClassName), ImmutableList.<Object>of(config),
ImmutableList.<Object>of(jobProps)));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
this.selectionPolicies = builder.build();
if (this.selectionPolicies.size() == 0) {
throw new IOException("No selection policies specified for " + CombineSelectionPolicy.class.getCanonicalName());
}
this.combineOperation =
CombineOperation.valueOf(config.getString(VERSION_SELECTION_COMBINE_OPERATION).toUpperCase());
}
public CombineSelectionPolicy(Properties props)
throws IOException {
this(ConfigFactory.parseProperties(props), props);
}
/**
* Returns the most specific common superclass for the {@link #versionClass} of each embedded policy.
*/
@Override
public Class<? extends DatasetVersion> versionClass() {
if (this.selectionPolicies.size() == 1) {
return this.selectionPolicies.get(0).versionClass();
}
Class<? extends DatasetVersion> klazz = this.selectionPolicies.get(0).versionClass();
for (VersionSelectionPolicy<? extends DatasetVersion> policy : this.selectionPolicies) {
klazz = commonSuperclass(klazz, policy.versionClass());
}
return klazz;
}
@Override
public Collection<DatasetVersion> listSelectedVersions(final List<DatasetVersion> allVersions) {
List<Set<DatasetVersion>> candidateDeletableVersions = Lists.newArrayList(Iterables
.transform(this.selectionPolicies, new Function<VersionSelectionPolicy<DatasetVersion>, Set<DatasetVersion>>() {
@Nullable
@Override
public Set<DatasetVersion> apply(VersionSelectionPolicy<DatasetVersion> input) {
return Sets.newHashSet(input.listSelectedVersions(allVersions));
}
}));
switch (this.combineOperation) {
case INTERSECT:
return intersectDatasetVersions(candidateDeletableVersions);
case UNION:
return unionDatasetVersions(candidateDeletableVersions);
default:
throw new RuntimeException("Combine operation " + this.combineOperation + " not recognized.");
}
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public static Class<? extends DatasetVersion> commonSuperclass(Class<? extends DatasetVersion> classA,
Class<? extends DatasetVersion> classB) {
if (classA.isAssignableFrom(classB)) {
// a is superclass of b, so return class of a
return classA;
}
// a is not superclass of b. Either b is superclass of a, or they are not in same branch
// find closest superclass of a that is also a superclass of b
Class<?> klazz = classA;
while (!klazz.isAssignableFrom(classB)) {
klazz = klazz.getSuperclass();
}
if (DatasetVersion.class.isAssignableFrom(klazz)) {
return (Class<? extends DatasetVersion>) klazz;
}
// this should never happen, but there for safety
return DatasetVersion.class;
}
private static Set<DatasetVersion> intersectDatasetVersions(Collection<Set<DatasetVersion>> sets) {
if (sets.size() <= 0) {
return Sets.newHashSet();
}
Iterator<Set<DatasetVersion>> it = sets.iterator();
Set<DatasetVersion> outputSet = it.next();
while (it.hasNext()) {
outputSet = Sets.intersection(outputSet, it.next());
}
return outputSet;
}
private static Set<DatasetVersion> unionDatasetVersions(Collection<Set<DatasetVersion>> sets) {
if (sets.size() <= 0) {
return Sets.newHashSet();
}
Iterator<Set<DatasetVersion>> it = sets.iterator();
Set<DatasetVersion> outputSet = it.next();
while (it.hasNext()) {
outputSet = Sets.union(outputSet, it.next());
}
return outputSet;
}
}
| 2,680 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/NewestKSelectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import lombok.Data;
import lombok.ToString;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* Select the newest k versions of the dataset.
*/
@ToString
public class NewestKSelectionPolicy<T extends DatasetVersion> implements VersionSelectionPolicy<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(NewestKSelectionPolicy.class);
/**
* The number of newest versions to select. Only one of
* {@link #NEWEST_K_VERSIONS_SELECTED_KEY} and {@link #NEWEST_K_VERSIONS_NOTSELECTED_KEY} can
* be specified. The default is {@link #NEWEST_K_VERSIONS_SELECTED_KEY} with a value of
* {@link #VERSIONS_SELECTED_DEFAULT_INT}. Valid values are in the range
* [{@link #MIN_VERSIONS_ALLOWED}, {@link #MAX_VERSIONS_ALLOWED}].
*/
public static final String NEWEST_K_VERSIONS_SELECTED_KEY = "selection.newestK.versionsSelected";
/**
* The number of newest versions to exclude from the result. Only one of
* {@link #NEWEST_K_VERSIONS_SELECTED_KEY} and {@link #NEWEST_K_VERSIONS_NOTSELECTED_KEY} can
* be specified. The default is {@link #NEWEST_K_VERSIONS_SELECTED_KEY} with a value of
* {@link #VERSIONS_SELECTED_DEFAULT}. Valid values are in the range
* [{@link #MIN_VERSIONS_ALLOWED}, {@link #MAX_VERSIONS_ALLOWED}].
*/
public static final String NEWEST_K_VERSIONS_NOTSELECTED_KEY = "selection.newestK.versionsNotSelected";
public static final Integer VERSIONS_SELECTED_DEFAULT = 2;
public static final Integer MAX_VERSIONS_ALLOWED = 1000000;
public static final Integer MIN_VERSIONS_ALLOWED = 1;
@Data
private static class Params {
private final int versionsSelected;
private final boolean excludeMode;
Params(int versionsSelected, boolean excludeMode) {
Preconditions.checkArgument(versionsSelected >= MIN_VERSIONS_ALLOWED && versionsSelected <= MAX_VERSIONS_ALLOWED);
this.versionsSelected = versionsSelected;
this.excludeMode = excludeMode;
}
static Params createFromConfig(Config config) {
if (config.hasPath(NEWEST_K_VERSIONS_SELECTED_KEY)) {
if (config.hasPath(NEWEST_K_VERSIONS_NOTSELECTED_KEY)) {
throw new RuntimeException("Only one of " + NEWEST_K_VERSIONS_SELECTED_KEY + " and "
+ NEWEST_K_VERSIONS_NOTSELECTED_KEY + " can be specified.");
}
return new Params(config.getInt(NEWEST_K_VERSIONS_SELECTED_KEY), false);
} else if (config.hasPath(NEWEST_K_VERSIONS_NOTSELECTED_KEY)) {
return new Params(config.getInt(NEWEST_K_VERSIONS_NOTSELECTED_KEY), true);
} else {
return new Params(VERSIONS_SELECTED_DEFAULT, false);
}
}
static Params createFromProps(Properties props) {
return createFromConfig(ConfigFactory.parseProperties(props));
}
}
private final Params params;
private NewestKSelectionPolicy(Params params) {
this.params = params;
LOGGER.info(String.format("Will %s %d versions of each dataset.", (this.params.excludeMode ? "select" : "exclude"),
this.params.versionsSelected));
}
public NewestKSelectionPolicy(int versionsRetained, boolean excludeMode) {
this(new Params(versionsRetained, excludeMode));
}
public NewestKSelectionPolicy(Properties props) {
this(Params.createFromProps(props));
}
public NewestKSelectionPolicy(Config config) {
this(Params.createFromConfig(config));
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return DatasetVersion.class;
}
@Override
public Collection<T> listSelectedVersions(List<T> allVersions) {
if (this.isExcludeMode()) {
return getBoundarySafeSublist(allVersions, this.getVersionsSelected(), allVersions.size());
}
return getBoundarySafeSublist(allVersions, 0, this.getVersionsSelected());
}
private List<T> getBoundarySafeSublist(List<T> l, int fromIndex, int toIndex) {
fromIndex = Math.min(fromIndex, l.size());
toIndex = Math.min(toIndex, l.size());
return l.subList(fromIndex, toIndex);
}
@VisibleForTesting
int getVersionsSelected() {
return this.params.getVersionsSelected();
}
@VisibleForTesting
boolean isExcludeMode() {
return this.params.isExcludeMode();
}
}
| 2,681 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/policy/SelectBeforeTimeBasedPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Properties;
import lombok.ToString;
import org.joda.time.Period;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* Selects {@link TimestampedDatasetVersion}s older than lookbackTime.
*/
@Alias("SelectBeforeTimeBasedPolicy")
@ToString(callSuper=true)
public class SelectBeforeTimeBasedPolicy extends SelectBetweenTimeBasedPolicy {
private static final String TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY = "selection.timeBased.lookbackTime";
public SelectBeforeTimeBasedPolicy(Config conf) {
super(Optional.of(getMinLookbackTime(conf)), Optional.<Period>absent());
}
public SelectBeforeTimeBasedPolicy(Properties props) {
super(ConfigUtils.propertiesToConfig(props));
}
private static Period getMinLookbackTime(Config conf) {
Preconditions.checkArgument(conf.hasPath(TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY),
String.format("Required property %s is not specified", TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY));
return SelectBetweenTimeBasedPolicy.getLookBackPeriod(conf.getString(TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY));
}
}
| 2,682 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/gobblin/data/management/conversion/hive/source/HiveWorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.data.management.conversion.hive.source;
import gobblin.source.workunit.WorkUnit;
/***
* Shim layer for org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit
*/
public class HiveWorkUnit extends org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit {
public HiveWorkUnit() {
super();
}
public HiveWorkUnit(WorkUnit workunit) {
super(workunit);
}
}
| 2,683 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/gobblin/data/management/copy/OwnerAndPermission.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.data.management.copy;
import org.apache.hadoop.fs.permission.FsPermission;
/***
* Shim layer for org.apache.gobblin.data.management.copy.OwnerAndPermission
*/
public class OwnerAndPermission extends org.apache.gobblin.data.management.copy.OwnerAndPermission {
public OwnerAndPermission(String owner, String group, FsPermission fsPermission) {
super(owner, group, fsPermission);
}
}
| 2,684 |
0 | Create_ds/gobblin/gobblin-test-utils/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/test/java/org/apache/gobblin/test/ErrorManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
/**
* Created by sdas on 7/27/16.
*/
public class ErrorManagerTest {
@Test
public void testErrorEvery()
{
Properties props = new Properties();
props.setProperty(ErrorManager.ERROR_TYPE_CONFIGURATION_KEY, "nth");
props.setProperty(ErrorManager.FLAKY_ERROR_EVERY_CONFIGURATION_KEY, "5");
Config config = ConfigFactory.parseProperties(props);
ErrorManager errorManager = new ErrorManager(config);
for (int j = 0; j < 5; ++j) {
for (int i = 0; i < 4; ++i) {
Assert.assertEquals(errorManager.nextError(null), false, "Failed on " + i);
}
Assert.assertEquals(errorManager.nextError(null), true, "Failed on the last one");
}
}
@Test
public void testErrorRegex()
{
Properties props = new Properties();
props.setProperty(ErrorManager.ERROR_TYPE_CONFIGURATION_KEY, "regex");
props.setProperty(ErrorManager.FLAKY_ERROR_REGEX_PATTERN_KEY, ":index:0");
Config config = ConfigFactory.parseProperties(props);
ErrorManager<String> errorManager = new ErrorManager<>(config);
Assert.assertEquals(errorManager.nextError(":index:0:seq:1"), true);
Assert.assertEquals(errorManager.nextError(":index:1:seq:1"), false);
}
}
| 2,685 |
0 | Create_ds/gobblin/gobblin-test-utils/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/test/java/org/apache/gobblin/test/RandomCredentialStoreTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.crypto.CredentialStore;
import org.apache.gobblin.test.crypto.TestEncryptionProvider;
import org.apache.gobblin.test.crypto.TestRandomCredentialStore;
public class RandomCredentialStoreTest {
@Test
public void testSuccess() {
Map<String, Object> params =
ImmutableMap.<String, Object>of("keystore_type", TestRandomCredentialStore.TAG, "num_keys", "4");
CredentialStore store = new TestEncryptionProvider().buildCredentialStore(params);
Assert.assertNotNull(store);
Assert.assertEquals(store.getAllEncodedKeys().size(), 4);
}
@Test
public void testSeedsGiveRepeatableKeys() {
Map<String, Object> params =
ImmutableMap.<String, Object>of("keystore_type", TestRandomCredentialStore.TAG, "num_keys", "1", "random_seed",
"12345");
byte[] expectedKey = new byte[]{-42, 32, -97, 92, 49, -77, 97, -125, 34, -87, -40, -18, 120, 7, -56, -22};
CredentialStore store = new TestEncryptionProvider().buildCredentialStore(params);
Assert.assertNotNull(store);
Assert.assertEquals(store.getAllEncodedKeys().size(), 1);
Assert.assertEquals(store.getEncodedKey("0"), expectedKey);
}
}
| 2,686 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.ArrayList;
import java.util.Random;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class TestUtils {
private static final Random rng = new Random();
public static byte[] generateRandomBytes() {
int length = rng.nextInt(200);
return generateRandomBytes(length);
}
public static byte[] generateRandomBytes(int numBytes) {
byte[] messageBytes = new byte[numBytes];
rng.nextBytes(messageBytes);
return messageBytes;
}
private static final char[] alphas = new char[26];
public static Long generateRandomLong() {
return rng.nextLong();
}
static {
char ch = 'a';
for (int i = 0; i < 26; i++) {
alphas[i] = ch++;
}
}
public static String generateRandomAlphaString(int stringLength) {
char[] newString = new char[stringLength];
for (int i = 0; i < stringLength; ++i)
{
newString[i] = alphas[rng.nextInt(26)];
}
return new String(newString);
}
/**
* TODO: Currently generates a static schema avro record.
**/
public static GenericRecord generateRandomAvroRecord() {
ArrayList<Schema.Field> fields = new ArrayList<Schema.Field>();
String fieldName = "field1";
Schema fieldSchema = Schema.create(Schema.Type.STRING);
String docString = "doc";
fields.add(new Schema.Field(fieldName, fieldSchema, docString, null));
Schema schema = Schema.createRecord("name", docString, "test",false);
schema.setFields(fields);
GenericData.Record record = new GenericData.Record(schema);
record.put("field1", "foobar");
return record;
}
/**
* Returns a free port number on localhost.
*
* Heavily inspired from org.eclipse.jdt.launching.SocketUtil (to avoid a dependency to JDT just because of this).
* Slightly improved with close() missing in JDT. And throws exception instead of returning -1.
*
* @return a free port number on localhost
* @throws IllegalStateException if unable to find a free port
*/
public synchronized static int findFreePort() {
ServerSocket socket = null;
try {
socket = new ServerSocket(0);
socket.setReuseAddress(true);
int port = socket.getLocalPort();
try {
socket.close();
} catch (IOException e) {
// Ignore IOException on close()
}
return port;
} catch (IOException e) {
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
}
}
}
throw new IllegalStateException("Could not find a free TCP/IP port");
}
}
| 2,687 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/NthTimingType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* Every nth call has a different latency than the default latency
*/
public class NthTimingType implements TimingType {
private final int n;
private final long defaultTimeMillis;
private final long nthTimeMillis;
private int currentNum;
public NthTimingType(int n, long defaultTimeMillis, long nthTimeMillis) {
this.n = n;
this.defaultTimeMillis = defaultTimeMillis;
this.nthTimeMillis = nthTimeMillis;
this.currentNum = 0;
}
@Override
public long nextTimeMillis() {
currentNum++;
if (currentNum % n == 0) {
return nthTimeMillis;
} else {
return defaultTimeMillis;
}
}
}
| 2,688 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/TimingManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* A class to manage timing based testing. Supports both sync and async latency decisions.
*/
public class TimingManager {
private final boolean isSync;
private final TimingType timingType;
public TimingManager(boolean isSync, TimingType timingType) {
this.isSync = isSync;
this.timingType = timingType;
}
public TimingResult nextTime() {
return new TimingResult(isSync, timingType.nextTimeMillis());
}
}
| 2,689 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/TimingType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* An interface that provides the latency of the next call.
*/
public interface TimingType {
public long nextTimeMillis();
}
| 2,690 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/ErrorManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.util.Random;
import java.util.regex.Pattern;
import com.typesafe.config.Config;
import lombok.Builder;
import org.apache.gobblin.util.ConfigUtils;
/**
* A class that can be configured to simulate errors
*/
public class ErrorManager<D> {
public enum ErrorType
{
RANDOM,
NTH,
REGEX,
ALL
};
private static final int DEFAULT_N = 5;
private final ErrorType errorType;
private final Random random = new Random();
private final Pattern pattern;
private final int num;
private int index = 0;
public static final String ERROR_TYPE_CONFIGURATION_KEY="flaky.errorType";
static final String FLAKY_ERROR_EVERY_CONFIGURATION_KEY = "flaky.errorEvery";
static final String FLAKY_ERROR_REGEX_PATTERN_KEY = "flaky.regexPattern" ;
private static final ErrorType DEFAULT_ERROR_TYPE = ErrorType.RANDOM;
private static ErrorType getType(Config config) {
String type = ConfigUtils.getString(config, ERROR_TYPE_CONFIGURATION_KEY, "");
ErrorType errorType;
if (!type.isEmpty())
{
errorType = ErrorType.valueOf(type.toUpperCase());
}
else {
errorType = DEFAULT_ERROR_TYPE;
}
return errorType;
}
private static int getNum(Config config)
{
return ConfigUtils.getInt(config, FLAKY_ERROR_EVERY_CONFIGURATION_KEY, DEFAULT_N);
}
private static String getPattern(Config config)
{
return ConfigUtils.getString(config, FLAKY_ERROR_REGEX_PATTERN_KEY, null);
}
public ErrorManager(Config config) {
this(getType(config), getNum(config), getPattern(config));
}
@Builder
public ErrorManager(ErrorType errorType, int errorEvery, String pattern)
{
this.errorType = errorType;
this.num = errorEvery;
this.pattern = (pattern == null)?null:Pattern.compile(pattern);
}
public boolean nextError(D record) {
switch (errorType) {
case ALL:
return true;
case NTH:
return (++index % num == 0);
case RANDOM:
return random.nextBoolean();
case REGEX:
return pattern.matcher(record.toString()).find();
default:
throw new IllegalStateException("Unexpected error type: " + errorType.toString());
}
}
}
| 2,691 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/TimingResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* A class to hold the result of a latency decision in a testing context.
*/
public class TimingResult {
public final boolean isSync;
public final long timeValueMillis;
TimingResult(boolean isSync, long timeValueMillis) {
this.isSync = isSync;
this.timeValueMillis = timeValueMillis;
}
}
| 2,692 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/TestRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.ToString;
/**
* A Test record
*/
@Getter
@ToString
@AllArgsConstructor
public class TestRecord {
private int partition;
private long sequence;
private String payload;
}
| 2,693 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/ConstantTimingType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* Every call takes a certain amount of time.
*/
public class ConstantTimingType implements TimingType {
private long timeDurationMillis;
public ConstantTimingType(long timeDurationMillis) {
this.timeDurationMillis = timeDurationMillis;
}
public long nextTimeMillis() {
return this.timeDurationMillis;
}
}
| 2,694 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/crypto/InsecureShiftCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test.crypto;
import java.io.FilterInputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
import org.apache.gobblin.codec.StreamCodec;
/**
* Simple encryption algorithm that just increments every byte sent
* through it by 1. Useful for unit tests or proof of concept, but is not actually secure.
*/
public class InsecureShiftCodec implements StreamCodec {
public static final String TAG = "insecure_shift";
public InsecureShiftCodec(Map<String, Object> parameters) {
// InsecureShiftCodec doesn't care about parameters
}
@Override
public OutputStream encodeOutputStream(OutputStream origStream) {
return new FilterOutputStream(origStream) {
@Override
public void write(int b) throws IOException {
out.write((b + 1) % 256);
}
@Override
public void write(byte[] b) throws IOException {
this.write(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
for (int i = off; i < off + len; i++) {
this.write(b[i]);
}
}
@Override
public void close() throws IOException {
out.close();
}
};
}
@Override
public InputStream decodeInputStream(InputStream in) {
return new FilterInputStream(in) {
@Override
public int read() throws IOException {
int upstream = in.read();
if (upstream == 0) {
upstream = 255;
} else if (upstream > 0) {
upstream--;
}
return upstream;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
for (int i = 0; i < len; i++) {
int result = read();
if (result == -1) {
return (i == 0) ? -1 : i;
}
b[off + i] = (byte) result;
}
return len;
}
};
}
@Override
public String getTag() {
return TAG;
}
}
| 2,695 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/crypto/TestRandomCredentialStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test.crypto;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.gobblin.crypto.CredentialStore;
public class TestRandomCredentialStore implements CredentialStore {
public static final String TAG = "testRandomCredentialStore";
private Map<String, byte[]> keys;
public TestRandomCredentialStore(int numKeys) {
this(numKeys, System.currentTimeMillis());
}
public TestRandomCredentialStore(int numKeys, long seed) {
this.keys = new HashMap<>();
Random rand = new Random(seed);
for (int i = 0; i < numKeys; i++) {
byte[] keyBuf = new byte[16];
rand.nextBytes(keyBuf);
keys.put(String.valueOf(i), keyBuf);
}
}
@Override
public byte[] getEncodedKey(String id) {
return keys.get(id);
}
@Override
public Map<String, byte[]> getAllEncodedKeys() {
return Collections.unmodifiableMap(keys);
}
}
| 2,696 |
0 | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test | Create_ds/gobblin/gobblin-test-utils/src/main/java/org/apache/gobblin/test/crypto/TestEncryptionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test.crypto;
import java.util.Map;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.crypto.CredentialStore;
import org.apache.gobblin.crypto.CredentialStoreProvider;
import org.apache.gobblin.crypto.EncryptionProvider;
public class TestEncryptionProvider implements CredentialStoreProvider, EncryptionProvider {
private static final String INSECURE_SHIFT_TAG = InsecureShiftCodec.TAG;
@Override
public CredentialStore buildCredentialStore(Map<String, Object> parameters) {
String csType = (String)parameters.get("keystore_type"); // Don't want to take compile-time dependency on gobblin-core for this constant
if (csType.equals(TestRandomCredentialStore.TAG)) {
int numKeys = Integer.parseInt((String)parameters.getOrDefault("num_keys", "1"));
String seedParam = (String)parameters.getOrDefault("random_seed", null);
long seed = System.currentTimeMillis();
if (seedParam != null) {
seed = Long.parseLong(seedParam);
}
return new TestRandomCredentialStore(numKeys, seed);
}
return null;
}
@Override
public StreamCodec buildStreamCryptoProvider(String algorithm, Map<String, Object> parameters) {
switch (algorithm) {
case INSECURE_SHIFT_TAG:
return new InsecureShiftCodec(parameters);
default:
return null;
}
}
}
| 2,697 |
0 | Create_ds/gobblin/gobblin-admin/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-admin/src/test/java/org/apache/gobblin/admin/AdminWebServerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.admin;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.testng.annotations.*;
import java.io.IOException;
import java.net.URI;
import java.util.Properties;
import static org.testng.AssertJUnit.*;
public class AdminWebServerTest {
private AdminWebServer server;
private final String portNumber = "54320";
@BeforeTest
public void startServer() {
Properties properties = new Properties();
properties.put(ConfigurationKeys.ADMIN_SERVER_PORT_KEY, this.portNumber);
this.server = new AdminWebServer(properties, URI.create("http://foobar:3333"));
try {
this.server.startUp();
} catch (Exception e) {
fail(String.format("Exception starting server: %s", e.toString()));
}
}
@AfterTest
public void stopServer() {
try {
this.server.shutDown();
} catch (Exception e) {
// do nothing
}
}
@Test
public void testGetSettingsJs() throws IOException {
CloseableHttpClient client = HttpClients.createDefault();
HttpGet getReq = new HttpGet(String.format("http://localhost:%s/js/settings.js", this.portNumber));
try (CloseableHttpResponse response = client.execute(getReq)) {
assertEquals(200, response.getStatusLine().getStatusCode());
HttpEntity body = response.getEntity();
String bodyString = EntityUtils.toString(body);
assertStringContains("http://foobar", bodyString);
assertStringContains("3333", bodyString);
}
}
@Test
public void testGetIndex() throws IOException {
CloseableHttpClient client = HttpClients.createDefault();
HttpGet getReq = new HttpGet(String.format("http://localhost:%s/", this.portNumber));
try (CloseableHttpResponse response = client.execute(getReq)) {
assertEquals(200, response.getStatusLine().getStatusCode());
HttpEntity body = response.getEntity();
String bodyString = EntityUtils.toString(body);
assertStringContains("JOB SUMMARY", bodyString);
}
}
private static void assertStringContains(String expected, String container) {
assertTrue(String.format("Expected %s to contain %s", container, expected), container.contains(expected));
}
}
| 2,698 |
0 | Create_ds/gobblin/gobblin-admin/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-admin/src/main/java/org/apache/gobblin/admin/DefaultAdminWebServerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.admin;
import java.net.URI;
import java.util.Properties;
import com.google.common.util.concurrent.Service;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.runtime.api.AdminWebServerFactory;
/**
* A factory for the default implementation of the admin web server
*/
@Alias(value="default")
public class DefaultAdminWebServerFactory implements AdminWebServerFactory {
/** {@inheritDoc} */
@Override
public Service createInstance(Properties config, URI executionInfoServerURI) {
return new AdminWebServer(config, executionInfoServerURI);
}
}
| 2,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.