index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/finder/DatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset.finder;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
/**
* {@inheritDoc}
* @deprecated use {@link DatasetsFinder}
*/
@Deprecated
public interface DatasetFinder<T extends Dataset> extends DatasetsFinder<T> {
}
| 2,400 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/finder/TimeBasedDatasetStoreDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset.finder;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.gobblin.data.management.retention.dataset.CleanableDatasetStoreDataset;
import org.apache.gobblin.data.management.retention.dataset.TimeBasedDatasetStoreDataset;
import org.apache.gobblin.metastore.DatasetStoreDataset;
import org.apache.gobblin.metastore.DatasetStoreDatasetFinder;
import org.apache.hadoop.fs.FileSystem;
/**
* A {@link DatasetStoreDatasetFinder} that returns {@link CleanableDatasetStoreDataset}
*/
public class TimeBasedDatasetStoreDatasetFinder extends DatasetStoreDatasetFinder {
private Properties props;
public TimeBasedDatasetStoreDatasetFinder(FileSystem fs, Properties props) throws IOException {
super(fs, props);
this.props = props;
}
@Override
public List<DatasetStoreDataset> findDatasets() throws IOException {
return super.findDatasets().stream()
.map(dataset -> new TimeBasedDatasetStoreDataset(dataset.getKey(), dataset.getDatasetStateStoreMetadataEntries(), props))
.collect(Collectors.toList());
}
}
| 2,401 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/finder/CleanableHiveDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset.finder;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.api.Table;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.retention.dataset.CleanableHiveDataset;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
public class CleanableHiveDatasetFinder extends HiveDatasetFinder {
public CleanableHiveDatasetFinder(FileSystem fs, Properties properties) throws IOException {
super(fs, setConfigPrefix(properties));
}
public CleanableHiveDatasetFinder(FileSystem fs, Properties properties, ConfigClient configClient) throws IOException {
super(fs, setConfigPrefix(properties), configClient);
}
protected CleanableHiveDataset createHiveDataset(Table table, Config datasetConfig) throws IOException {
return new CleanableHiveDataset(super.fs, super.clientPool, new org.apache.hadoop.hive.ql.metadata.Table(table), super.properties, datasetConfig);
}
private static Properties setConfigPrefix(Properties props) {
if (!props.containsKey(HIVE_DATASET_CONFIG_PREFIX_KEY)) {
props.setProperty(HIVE_DATASET_CONFIG_PREFIX_KEY, ConfigurableCleanableDataset.RETENTION_CONFIGURATION_KEY);
}
return props;
}
}
| 2,402 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/source/DatasetCleanerSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.source;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.retention.DatasetCleanerTask;
import org.apache.gobblin.runtime.retention.DatasetCleanerTaskFactory;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ConfigUtils;
/**
* This class generates workunits from the configuration for cleaning datasets using instances of the
* {@link DatasetCleanerTask}
*/
@Slf4j
public class DatasetCleanerSource implements Source<Object, Object> {
public static final String DATASET_CLEANER_SOURCE_PREFIX = "datasetCleanerSource";
/**
* This config holds a list of configuration names. Each configuration name defines a job that can have scoped config.
*
* So the list "config1, config2" means that two jobs are configured.
*
* Then the config can be scoped like:
* datasetCleanerSource.config1.state.store.db.table=state_table1
* datasetCleanerSource.config2.state.store.db.table=state_table2
*
* Configuration fallback is as follows:
* scoped config followed by config under datasetCleanerSource followed by general config
* So make sure that the scoped config name does not collide with valid configuration prefixes.
*/
public static final String DATASET_CLEANER_CONFIGURATIONS = DATASET_CLEANER_SOURCE_PREFIX + ".configurations";
/**
* Create a work unit for each configuration defined or a single work unit if no configurations are defined
* @param state see {@link org.apache.gobblin.configuration.SourceState}
* @return list of workunits
*/
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
Config sourceConfig = ConfigUtils.getConfigOrEmpty(config, DATASET_CLEANER_SOURCE_PREFIX);
List<String> configurationNames = ConfigUtils.getStringList(config, DATASET_CLEANER_CONFIGURATIONS);
// use a dummy configuration name if none set
if (configurationNames.isEmpty()) {
configurationNames = ImmutableList.of("DummyConfig");
}
for (String configurationName: configurationNames) {
WorkUnit workUnit = WorkUnit.createEmpty();
// specific configuration prefixed by the configuration name has precedence over the source specific configuration
// and the source specific configuration has precedence over the general configuration
Config wuConfig = ConfigUtils.getConfigOrEmpty(sourceConfig, configurationName).withFallback(sourceConfig)
.withFallback(config);
workUnit.setProps(ConfigUtils.configToProperties(wuConfig), new Properties());
TaskUtils.setTaskFactoryClass(workUnit, DatasetCleanerTaskFactory.class);
workUnits.add(workUnit);
}
return workUnits;
}
@Override
public Extractor<Object, Object> getExtractor(WorkUnitState state) throws IOException {
return null;
}
@Override
public void shutdown(SourceState state) {
}
}
| 2,403 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/action/MultiAccessControlAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.action;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigRenderOptions;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* A wrapper around {@link AccessControlAction} that delegates the {@link #execute(List)} call to multiple embedded
* {@link AccessControlAction}s.
* <p>
* The embedded {@link AccessControlAction}s are specified at key <code>policies</code> of the <code>actionConfig</code>.
* If {@link VersionSelectionPolicy}s of different embedded {@link AccessControlAction}s overlap, the last {@link AccessControlAction}
* in <code>policies</code> key wins.
* </p>
* Use {@link MultiAccessControlActionFactory} to create new {@link MultiAccessControlAction}s
*/
public class MultiAccessControlAction extends RetentionAction {
private final List<AccessControlAction> embeddedAccessControlActions;
private static final String POLICIES_KEY = "policies";
/**
* The expected paths in the <code>actionConfig</code> are shown below.
* <pre>
* {
* ## list all the policies, each policy should have a path in the config
* policies = [restricted, .....]
*
* restricted {
* selection {
* policy.class=org.apache.gobblin.data.management.policy.SelectBeforeTimeBasedPolicy
* timeBased.lookbackTime = 7d
* }
* mode : 750
* owner : onr
* group : grp
* }
* }
* </pre>
* @param actionConfig to use while creating a new {@link MultiAccessControlAction}
* @param fs
*/
private MultiAccessControlAction(Config actionConfig, FileSystem fs, Config jobConfig) {
super(actionConfig, fs, jobConfig);
this.embeddedAccessControlActions = Lists.newArrayList();
for (String policy : ConfigUtils.getStringList(actionConfig, POLICIES_KEY)) {
Preconditions.checkArgument(
actionConfig.hasPath(policy),
String.format("Policy %s is specified at key %s but actionConfig does not have config for this policy."
+ "Complete actionConfig %s", policy, POLICIES_KEY,
actionConfig.root().render(ConfigRenderOptions.concise())));
embeddedAccessControlActions.add(new AccessControlAction(actionConfig.getConfig(policy), fs, jobConfig));
}
}
/**
* Calls {@link AccessControlAction#execute(List)} on each of the embedded {@link AccessControlAction}s
*
* {@inheritDoc}
* @see org.apache.gobblin.data.management.retention.action.RetentionAction#execute(java.util.List)
*/
@Override
public void execute(List<DatasetVersion> allVersions) throws IOException {
for (AccessControlAction aca : this.embeddedAccessControlActions) {
aca.execute(allVersions);
}
}
/**
* A factory class to create {@link MultiAccessControlAction}s
*/
public static class MultiAccessControlActionFactory implements RetentionActionFactory {
private static String ACCESS_CONTROL_KEY = "accessControl";
private static String LEGACY_ACCESS_CONTROL_KEY = ConfigurableCleanableDataset.RETENTION_CONFIGURATION_KEY + "."
+ ACCESS_CONTROL_KEY;
@Override
public MultiAccessControlAction createRetentionAction(Config config, FileSystem fs, Config jobConfig) {
Preconditions.checkArgument(this.canCreateWithConfig(config),
"Can not create MultiAccessControlAction with config " + config.root().render(ConfigRenderOptions.concise()));
if (config.hasPath(LEGACY_ACCESS_CONTROL_KEY)) {
return new MultiAccessControlAction(config.getConfig(LEGACY_ACCESS_CONTROL_KEY), fs, jobConfig);
} else if (config.hasPath(ACCESS_CONTROL_KEY)) {
return new MultiAccessControlAction(config.getConfig(ACCESS_CONTROL_KEY), fs, jobConfig);
}
throw new IllegalStateException(
"RetentionActionFactory.canCreateWithConfig returned true but could not create MultiAccessControlAction");
}
@Override
public boolean canCreateWithConfig(Config config) {
return config.hasPath(LEGACY_ACCESS_CONTROL_KEY) || config.hasPath(ACCESS_CONTROL_KEY);
}
}
}
| 2,404 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/action/RetentionAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.action;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.dataset.Dataset;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.retention.dataset.FsCleanableHelper;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* An abstraction to perform a retention action for a subset of {@link DatasetVersion}s.
* A few kinds of actions are deletion, access control, encryption, archival etc.
*/
public abstract class RetentionAction {
protected final FileSystem fs;
@SuppressWarnings("rawtypes")
protected final ClassAliasResolver<VersionSelectionPolicy> versionSelectionAliasResolver;
protected final boolean isSimulateMode;
public RetentionAction(Config actionConfig, FileSystem fs, Config jobConfig) {
this.versionSelectionAliasResolver = new ClassAliasResolver<>(VersionSelectionPolicy.class);
this.fs = fs;
this.isSimulateMode = ConfigUtils.getBoolean(jobConfig, FsCleanableHelper.SIMULATE_KEY,
Boolean.valueOf(FsCleanableHelper.SIMULATE_DEFAULT));
}
/**
* Execute the action on all {@link DatasetVersion}s or a subset of {@link DatasetVersion}s. Each {@link Dataset}
* uses the {@link VersionFinder} to find all the {@link DatasetVersion}s and calls this method to perform the necessary
* action on those {@link DatasetVersion}s
* <p>
* <b>Note</b> Any kind of {@link VersionSelectionPolicy} has <b>NOT</b> been applied to the list of {@link DatasetVersion}s
* being passed. It is the responsibility of the {@link RetentionAction} to filter the {@link DatasetVersion}s by
* applying {@link VersionSelectionPolicy}s and then perform the action.
* </p>
* @param allVersions list of all {@link DatasetVersion}s found by the {@link DatasetsFinder}.
*/
public abstract void execute(List<DatasetVersion> allVersions) throws IOException;
/**
* A factory to create new {@link RetentionAction}s
*
*/
public interface RetentionActionFactory {
/**
* A factory method to create a new {@link RetentionAction} using a <code>config</code>. The {@link Dataset} always
* calls {@link #canCreateWithConfig(Config)} before calling this method.
*
* @param config to use to create the {@link RetentionAction}
* @return A new {@link RetentionAction}
*/
RetentionAction createRetentionAction(Config config, FileSystem fs, Config jobConfig);
/**
* Method to check if a {@link RetentionAction} can be created/instantiated with the <code>config</code>.
* If the specific type of {@link RetentionAction} has been specified in the configuration the method returns
* <code>true</code>
* If the method returns <code>true</code>, {@link #createRetentionAction(Config, FileSystem)} can be called to create
* this {@link RetentionAction}.
*
* @param config to use to create the {@link RetentionAction}
* @param jobConfig is the job level config
* @return true if the specific type of {@link RetentionAction} has been specified in the configuration, false otherwise
*/
boolean canCreateWithConfig(Config config);
}
/*
* Since {@link VersionSelectionPolicy} does not have a factory to create new objects we need to use the legacy
* pattern of creating new objects using GobblinConstructorUtils
*/
@SuppressWarnings("unchecked")
protected VersionSelectionPolicy<DatasetVersion> createSelectionPolicy(Config selectionConfig, Config jobConfig) {
try {
String selectionPolicyKey =
StringUtils.substringAfter(ConfigurableCleanableDataset.SELECTION_POLICY_CLASS_KEY,
ConfigurableCleanableDataset.CONFIGURATION_KEY_PREFIX);
Preconditions.checkArgument(selectionConfig.hasPath(selectionPolicyKey));
String className = selectionConfig.getString(selectionPolicyKey);
return (VersionSelectionPolicy<DatasetVersion>) GobblinConstructorUtils.invokeFirstConstructor(
this.versionSelectionAliasResolver.resolveClass(className), ImmutableList.<Object> of(selectionConfig),
ImmutableList.<Object> of(selectionConfig, ConfigUtils.configToProperties(jobConfig)),
ImmutableList.<Object> of(ConfigUtils.configToProperties(jobConfig)));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
}
| 2,405 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/action/AccessControlAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.action;
import java.io.IOException;
import java.util.List;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.FileStatusAware;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* A {@link RetentionAction} that is used to change the permissions/owner/group of a {@link FileSystemDatasetVersion}
*/
@Slf4j
public class AccessControlAction extends RetentionAction {
/**
* Optional - The permission mode to set on selected versions either in octal or symbolic format. E.g 750
*/
private static final String MODE_KEY = "mode";
/**
* Optional - The owner to set on selected versions
*/
private static final String OWNER_KEY = "owner";
/**
* Optional - The group to set on selected versions
*/
private static final String GROUP_KEY = "group";
private final Optional<FsPermission> permission;
private final Optional<String> owner;
private final Optional<String> group;
@VisibleForTesting
@Getter
private final VersionSelectionPolicy<DatasetVersion> selectionPolicy;
@VisibleForTesting
AccessControlAction(Config actionConfig, FileSystem fs, Config jobConfig) {
super(actionConfig, fs, jobConfig);
this.permission = actionConfig.hasPath(MODE_KEY) ? Optional.of(new FsPermission(actionConfig.getString(MODE_KEY))) : Optional
.<FsPermission> absent();
this.owner = Optional.fromNullable(ConfigUtils.getString(actionConfig, OWNER_KEY, null));
this.group = Optional.fromNullable(ConfigUtils.getString(actionConfig, GROUP_KEY, null));
this.selectionPolicy = createSelectionPolicy(actionConfig, jobConfig);
}
/**
* Applies {@link #selectionPolicy} on <code>allVersions</code> and modifies permission/owner to the selected {@link DatasetVersion}s
* where necessary.
* <p>
* This action only available for {@link FileSystemDatasetVersion}. It simply skips the operation if a different type
* of {@link DatasetVersion} is passed.
* </p>
* {@inheritDoc}
* @see org.apache.gobblin.data.management.retention.action.RetentionAction#execute(java.util.List)
*/
@Override
public void execute(List<DatasetVersion> allVersions) throws IOException {
// Select version on which access control actions need to performed
for (DatasetVersion datasetVersion : this.selectionPolicy.listSelectedVersions(allVersions)) {
executeOnVersion(datasetVersion);
}
}
private void executeOnVersion(DatasetVersion datasetVersion) throws IOException {
// Perform action if it is a FileSystemDatasetVersion
if (datasetVersion instanceof FileSystemDatasetVersion) {
FileSystemDatasetVersion fsDatasetVersion = (FileSystemDatasetVersion) datasetVersion;
// If the version is filestatus aware, use the filestatus to ignore permissions update when the path already has
// the desired permissions
if (datasetVersion instanceof FileStatusAware) {
for (FileStatus fileStatus : ((FileStatusAware)datasetVersion).getFileStatuses()) {
if (needsPermissionsUpdate(fileStatus) || needsOwnerUpdate(fileStatus) || needsGroupUpdate(fileStatus)) {
updatePermissionsAndOwner(fileStatus.getPath());
}
}
} else {
for (Path path : fsDatasetVersion.getPaths()) {
updatePermissionsAndOwner(path);
}
}
}
}
private boolean needsPermissionsUpdate(FileStatus fileStatus) {
return this.permission.isPresent() && !this.permission.get().equals(fileStatus.getPermission());
}
private boolean needsOwnerUpdate(FileStatus fileStatus) {
return this.owner.isPresent() && !StringUtils.equals(owner.get(), fileStatus.getOwner());
}
private boolean needsGroupUpdate(FileStatus fileStatus) {
return this.group.isPresent() && !StringUtils.equals(group.get(), fileStatus.getGroup());
}
private void updatePermissionsAndOwner(Path path) throws IOException {
boolean atLeastOneOperationFailed = false;
if (this.fs.exists(path)) {
try {
// Update permissions if set in config
if (this.permission.isPresent()) {
if (!this.isSimulateMode) {
this.fs.setPermission(path, this.permission.get());
log.debug("Set permissions for {} to {}", path, this.permission.get());
} else {
log.info("Simulating set permissions for {} to {}", path, this.permission.get());
}
}
} catch (IOException e) {
log.error(String.format("Setting permissions failed on %s", path), e);
atLeastOneOperationFailed = true;
}
// Update owner and group if set in config
if (this.owner.isPresent() || this.group.isPresent()) {
if (!this.isSimulateMode) {
this.fs.setOwner(path, this.owner.orNull(), this.group.orNull());
log.debug("Set owner and group for {} to {}:{}", path, this.owner.orNull(),
this.group.orNull());
} else {
log.info("Simulating set owner and group for {} to {}:{}", path, this.owner.orNull(),
this.group.orNull());
}
}
if (atLeastOneOperationFailed) {
throw new RuntimeException(String.format(
"At least one failure happened while processing %s. Look for previous logs for failures", path));
}
}
}
}
| 2,406 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/MultiCleanableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import com.google.common.base.Optional;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import java.net.URI;
import java.util.Properties;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.hadoop.fs.FileSystem;
import com.typesafe.config.Config;
/**
* A Clenable DatasetFinder that instantiates multiple DatasetFinders.
* <p>
* If {@link #DATASET_FINDER_CLASS_KEY} is set, a single datasetFinder is created.
* Otherwise {@link #TAGS_TO_IMPORT_KEY} is used to find all the importedBy {@link URI}s from gobblin config store.
* The {@link Config} for each {@link URI} should have a {@link #DATASET_FINDER_CLASS_KEY} set.
* </p>
*
*/
public class MultiCleanableDatasetFinder extends MultiDatasetFinder {
/**
* Comma separated list of tags in the config store. Any dataset that imports this tag will be processed
*/
public static final String TAGS_TO_IMPORT_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "tag";
/**
* Exact dataset finder class to use
*/
public static final String DATASET_FINDER_CLASS_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "dataset.finder.class";
public static final String DEPRECATED_DATASET_PROFILE_CLASS_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "dataset.profile.class";
public MultiCleanableDatasetFinder(FileSystem fs, Properties jobProps) {
this(fs,jobProps,new EventSubmitter.Builder(Optional.absent(),"noMessage").build());
}
public MultiCleanableDatasetFinder(FileSystem fs, Properties jobProps, EventSubmitter eventSubmitter) {
super(fs, jobProps, eventSubmitter);
}
@Override
protected String datasetFinderClassKey() {
if (super.jobProps.containsKey(DEPRECATED_DATASET_PROFILE_CLASS_KEY)) {
return DEPRECATED_DATASET_PROFILE_CLASS_KEY;
}
return DATASET_FINDER_CLASS_KEY;
}
@Override
protected String datasetFinderImportedByKey() {
return TAGS_TO_IMPORT_KEY;
}
}
| 2,407 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ModificationTimeDatasetProfile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.data.management.retention.dataset.ModificationTimeDataset;
/**
* {@link org.apache.gobblin.dataset.DatasetsFinder} for {@link ModificationTimeDataset}s.
*
* Modification time datasets will be cleaned by the modification timestamps of the datasets that match
* 'gobblin.retention.dataset.pattern'.
*/
public class ModificationTimeDatasetProfile extends ConfigurableGlobDatasetFinder<Dataset> {
public ModificationTimeDatasetProfile(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public Dataset datasetAtPath(Path path) throws IOException {
return new ModificationTimeDataset(this.fs, this.props, path);
}
}
| 2,408 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/SnapshotDatasetProfile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.data.management.retention.dataset.SnapshotDataset;
import org.apache.gobblin.dataset.Dataset;
/**
* {@link org.apache.gobblin.dataset.DatasetsFinder} for snapshot datasets.
*
* <p>
* Snapshot datasets are datasets where each version is a snapshot/full-dump of a dataset (e.g. a database).
* </p>
*/
public class SnapshotDatasetProfile extends ConfigurableGlobDatasetFinder<Dataset> {
public SnapshotDatasetProfile(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public Dataset datasetAtPath(Path path) throws IOException {
return new SnapshotDataset(this.fs, this.props, path);
}
}
| 2,409 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/MultiDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import com.google.common.base.Optional;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientCache;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.api.VersionDoesNotExistException;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A DatasetFinder that instantiates multiple DatasetFinders. {@link #findDatasets()} will return a union of all the
* datasets found by each datasetFinder
* <p>
* Subclasses will specify the dataset finder class key name to instantiate. If {@link #datasetFinderClassKey()} is set
* in jobProps, a single datasetFinder is created. Otherwise {@link #datasetFinderImportedByKey()} is used to find all
* the importedBy {@link URI}s from gobblin config management. The {@link Config} for each {@link URI} should have a
* {@link #datasetFinderClassKey()} set.
* </p>
*
*/
@Slf4j
public abstract class MultiDatasetFinder implements DatasetsFinder<Dataset> {
private static final Splitter TAGS_SPLITTER = Splitter.on(",").omitEmptyStrings().trimResults();
protected abstract String datasetFinderClassKey();
protected abstract String datasetFinderImportedByKey();
List<DatasetsFinder<Dataset>> datasetFinders;
protected final Properties jobProps;
@SuppressWarnings({ "rawtypes", "unchecked" })
public MultiDatasetFinder(FileSystem fs, Properties jobProps) {
this(fs,jobProps,new EventSubmitter.Builder(Optional.absent(),"noMessage").build());
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public MultiDatasetFinder(FileSystem fs, Properties jobProps, EventSubmitter eventSubmitter) {
this.jobProps = jobProps;
try {
this.datasetFinders = Lists.newArrayList();
if (jobProps.containsKey(datasetFinderClassKey())) {
try {
log.info(String.format("Instantiating datasetfinder %s ", jobProps.getProperty(datasetFinderClassKey())));
this.datasetFinders.add((DatasetsFinder) GobblinConstructorUtils.invokeLongestConstructor(
Class.forName(jobProps.getProperty(datasetFinderClassKey())), fs, jobProps, eventSubmitter));
} catch (ReflectiveOperationException e) {
log.error(
String.format("Retention ignored could not instantiate datasetfinder %s.",
jobProps.getProperty(datasetFinderClassKey())), e);
Throwables.propagate(e);
}
} else if (jobProps.containsKey(datasetFinderImportedByKey())) {
log.info("Instantiating dataset finders using tag " + jobProps.getProperty(datasetFinderImportedByKey()));
ConfigClient client = ConfigClientCache.getClient(VersionStabilityPolicy.STRONG_LOCAL_STABILITY);
Collection<URI> importedBys = Lists.newArrayList();
for (String tag : TAGS_SPLITTER.split(jobProps.getProperty(datasetFinderImportedByKey()))) {
log.info("Looking for datasets that import tag " + tag);
importedBys.addAll(client.getImportedBy(new URI(tag), false));
}
for (URI importedBy : importedBys) {
Config datasetClassConfig = client.getConfig(importedBy);
try {
this.datasetFinders.add((DatasetsFinder) GobblinConstructorUtils.invokeFirstConstructor(
Class.forName(datasetClassConfig.getString(datasetFinderClassKey())), ImmutableList.of(fs, jobProps,
datasetClassConfig), ImmutableList.of(fs, jobProps, eventSubmitter),
ImmutableList.of(fs, jobProps)));
log.info(String.format("Instantiated datasetfinder %s for %s.",
datasetClassConfig.getString(datasetFinderClassKey()), importedBy));
} catch (InstantiationException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException | NoSuchMethodException | SecurityException | ClassNotFoundException e) {
log.error(String.format("Retention ignored for %s. Could not instantiate datasetfinder %s.", importedBy,
datasetClassConfig.getString(datasetFinderClassKey())), e);
Throwables.propagate(e);
}
}
} else {
log.warn(String.format(
"NO DATASET_FINDERS FOUND. Either specify dataset finder class at %s or specify the imported tags at %s",
datasetFinderClassKey(), datasetFinderImportedByKey()));
}
} catch (IllegalArgumentException | VersionDoesNotExistException | ConfigStoreFactoryDoesNotExistsException
| ConfigStoreCreationException | URISyntaxException e) {
Throwables.propagate(e);
}
}
@Override
public List<Dataset> findDatasets() throws IOException {
List<Dataset> datasets = Lists.newArrayList();
for (DatasetsFinder<Dataset> df : this.datasetFinders) {
datasets.addAll(df.findDatasets());
}
return datasets;
}
@Override
public Path commonDatasetRoot() {
throw new UnsupportedOperationException("There is no common dataset root for MultiDatasetFinder");
}
}
| 2,410 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ConfigurableGlobDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.GlobPattern;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
/**
* A configurable {@link DatasetsFinder} that looks for
* {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset}s using a glob pattern.
*/
public abstract class ConfigurableGlobDatasetFinder<T extends Dataset> implements DatasetsFinder<T> {
private static final Logger LOG = LoggerFactory.getLogger(ConfigurableGlobDatasetFinder.class);
private static final String CONFIGURATION_KEY_PREFIX = "gobblin.";
@Deprecated
/** @deprecated use {@link DATASET_FINDER_PATTERN_KEY} */
public static final String DATASET_PATTERN_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "dataset.pattern";
@Deprecated
/** @deprecated use {@link DATASET_FINDER_BLACKLIST_KEY} */
public static final String DATASET_BLACKLIST_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "dataset.blacklist";
public static final String DATASET_FINDER_PATTERN_KEY = CONFIGURATION_KEY_PREFIX + "dataset.pattern";
public static final String DATASET_FINDER_BLACKLIST_KEY = CONFIGURATION_KEY_PREFIX + "dataset.blacklist";
public static final String DATASET_FINDER_GLOB_BLACKLIST_KEY = CONFIGURATION_KEY_PREFIX + "dataset.glob.blacklist";
protected final Path datasetPattern;
private final Optional<Pattern> blacklist;
private final Optional<Pattern> globPatternBlacklist;
private final Path commonRoot;
protected final FileSystem fs;
protected final Properties props;
private static final Map<String, String> DEPRECATIONS = ImmutableMap.of(DATASET_FINDER_PATTERN_KEY,
DATASET_PATTERN_KEY, DATASET_FINDER_BLACKLIST_KEY, DATASET_BLACKLIST_KEY);
public ConfigurableGlobDatasetFinder(FileSystem fs, Properties jobProps, Config config) {
for (String property : requiredProperties()) {
Preconditions.checkArgument(config.hasPath(property) || config.hasPath(DEPRECATIONS.get(property)),
String.format("Missing required property %s", property));
}
if (ConfigUtils.hasNonEmptyPath(config, DATASET_BLACKLIST_KEY)) {
this.blacklist = Optional.of(Pattern.compile(config.getString(DATASET_BLACKLIST_KEY)));
} else if (ConfigUtils.hasNonEmptyPath(config, DATASET_FINDER_BLACKLIST_KEY)) {
this.blacklist = Optional.of(Pattern.compile(config.getString(DATASET_FINDER_BLACKLIST_KEY)));
} else {
this.blacklist = Optional.absent();
}
if (ConfigUtils.hasNonEmptyPath(config, DATASET_FINDER_GLOB_BLACKLIST_KEY)) {
this.globPatternBlacklist = Optional.of(GlobPattern.compile(config.getString(DATASET_FINDER_GLOB_BLACKLIST_KEY)));
} else {
this.globPatternBlacklist = Optional.absent();
}
this.fs = fs;
Path tmpDatasetPattern;
if (config.hasPath(DATASET_FINDER_PATTERN_KEY)) {
tmpDatasetPattern = new Path(config.getString(DATASET_FINDER_PATTERN_KEY));
} else {
tmpDatasetPattern = new Path(config.getString(DATASET_PATTERN_KEY));
}
this.datasetPattern =
tmpDatasetPattern.isAbsolute() ? tmpDatasetPattern : new Path(this.fs.getWorkingDirectory(), tmpDatasetPattern);
this.commonRoot = PathUtils.deepestNonGlobPath(this.datasetPattern);
this.props = jobProps;
}
public ConfigurableGlobDatasetFinder(FileSystem fs, Properties props) {
this(fs, props, ConfigFactory.parseProperties(props));
}
/**
* List of required properties for subclasses of this dataset. The constructor will check that the input
* {@link java.util.Properties} contain all properties returned.
* @return List of all required property keys in the constructor {@link java.util.Properties}.
*/
public List<String> requiredProperties() {
return Lists.newArrayList(DATASET_FINDER_PATTERN_KEY);
}
/**
* Finds all directories satisfying the input glob pattern, and creates a {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset}
* for each one using {@link #datasetAtPath}.
* @return List of {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset}s in the file system.
* @throws IOException
*/
@Override
public List<T> findDatasets() throws IOException {
List<T> datasets = Lists.newArrayList();
LOG.info("Finding datasets for pattern " + this.datasetPattern);
FileStatus[] fileStatuss = this.getDatasetDirs();
if (fileStatuss != null) {
for (FileStatus fileStatus : fileStatuss) {
Path pathToMatch = PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
if (this.blacklist.isPresent() && this.blacklist.get().matcher(pathToMatch.toString()).find()) {
continue;
}
if (this.globPatternBlacklist.isPresent() && this.globPatternBlacklist.get().matcher(pathToMatch.toString()).find()) {
continue;
}
LOG.info("Found dataset at " + fileStatus.getPath());
datasets.add(datasetAtPath(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath())));
}
}
return datasets;
}
/**
* @return all the directories that satisfy the input glob pattern.
* @throws IOException
*/
protected FileStatus[] getDatasetDirs() throws IOException {
return this.fs.globStatus(this.datasetPattern);
}
/**
* Returns the deepest non-glob ancestor of the dataset pattern.
*/
@Override
public Path commonDatasetRoot() {
return this.commonRoot;
}
/**
* Creates a {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset} from a path. The default implementation
* creates a {@link org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset}.
* @param path {@link org.apache.hadoop.fs.Path} where dataset is located.
* @return {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset} at that path.
* @throws IOException
*/
public abstract T datasetAtPath(Path path) throws IOException;
}
| 2,411 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ProxyableDatasetProfile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.util.ProxiedFileSystemCache;
import org.apache.gobblin.util.RateControlledFileSystem;
/**
* A wrapper of {@link org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder} that looks for
* {@link org.apache.gobblin.data.management.retention.dataset.Dataset}s with {@link org.apache.hadoop.fs.FileSystem}s
* {@link org.apache.gobblin.data.management.retention.dataset.CleanableDataset}s with {@link org.apache.hadoop.fs.FileSystem}s
* proxied as the owner of each dataset.
*/
public class ProxyableDatasetProfile extends ConfigurableGlobDatasetFinder {
public ProxyableDatasetProfile(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public Dataset datasetAtPath(Path path) throws IOException {
return new ConfigurableCleanableDataset<DatasetVersion>(this.getFsForDataset(path), this.props, path);
}
public FileSystem getFsForDataset(Path path) throws IOException {
Preconditions.checkArgument(this.props.containsKey(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS));
Preconditions.checkArgument(this.props.containsKey(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION));
FileSystem proxiedFileSystem = this.fs;
try {
proxiedFileSystem = ProxiedFileSystemCache.getProxiedFileSystemUsingKeytab(this.fs.getFileStatus(path).getOwner(),
this.props.getProperty(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS),
new Path(this.props.getProperty(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)), this.fs.getUri(),
this.fs.getConf());
} catch (ExecutionException e) {
throw new IOException("Cannot get proxied filesystem at Path: " + path, e);
}
if (this.props.contains(DatasetCleaner.DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT)) {
return new RateControlledFileSystem(proxiedFileSystem,
Long.parseLong(this.props.getProperty(DatasetCleaner.DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT)));
}
return proxiedFileSystem;
}
}
| 2,412 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/TrackingDatasetProfile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.data.management.retention.dataset.TrackingDataset;
import org.apache.gobblin.data.management.retention.version.finder.DateTimeDatasetVersionFinder;
/**
* {@link org.apache.gobblin.dataset.DatasetsFinder} for tracking datasets.
*
* <p>
* Tracking datasets are datasets where each data point represents a timestamped action, and the records are
* organized in a time aware directory pattern (e.g. one directory per minute / hour / day).
* </p>
*/
public class TrackingDatasetProfile extends ConfigurableGlobDatasetFinder {
public TrackingDatasetProfile(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public List<String> requiredProperties() {
List<String> requiredProperties = super.requiredProperties();
requiredProperties.add(DateTimeDatasetVersionFinder.RETENTION_DATE_TIME_PATTERN_KEY);
return requiredProperties;
}
@Override
public Dataset datasetAtPath(Path path) throws IOException {
return new TrackingDataset(this.fs, this.props, path);
}
}
| 2,413 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/GlobCleanableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* Creates {@link ConfigurableCleanableDataset} from a glob for retention jobs.
*/
public class GlobCleanableDatasetFinder
extends ConfigurableGlobDatasetFinder<ConfigurableCleanableDataset<FileSystemDatasetVersion>> {
public GlobCleanableDatasetFinder(FileSystem fs, Properties props) {
super(fs, props);
}
@Override
public ConfigurableCleanableDataset<FileSystemDatasetVersion> datasetAtPath(Path path) throws IOException {
return new ConfigurableCleanableDataset<>(this.fs, this.props, path,
LoggerFactory.getLogger(ConfigurableCleanableDataset.class));
}
}
| 2,414 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ConfigBasedCleanabledDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import com.google.common.base.Optional;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.typesafe.config.Config;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.data.management.copy.replication.ConfigBasedDatasetsFinder;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.configuration.ConfigurationKeys;
import lombok.extern.slf4j.Slf4j;
/**
* Based on the ConfigStore object to find all {@link ConfigurableCleanableDataset}
* Specifically for Retention job.
*/
@Slf4j
public class ConfigBasedCleanabledDatasetFinder extends ConfigBasedDatasetsFinder{
public FileSystem fileSystem;
public static final String DATASET_PATH = ConfigurationKeys.CONFIG_BASED_PREFIX + ".fullDatasetPath";
public ConfigBasedCleanabledDatasetFinder(FileSystem fs, Properties jobProps) throws IOException{
super(fs, jobProps);
fileSystem = fs;
}
protected Callable<Void> findDatasetsCallable(final ConfigClient confClient,
final URI u, final Properties p, Optional<List<String>> blacklistURNs, final Collection<Dataset> datasets) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
// Process each {@link Config}, find dataset and add those into the datasets
Config c = confClient.getConfig(u);
Dataset datasetForConfig =
new ConfigurableCleanableDataset(fileSystem, p, new Path(c.getString(DATASET_PATH)), c, log);
datasets.add(datasetForConfig);
return null;
}
};
}
}
| 2,415 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ManagedCleanableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.LoggerFactory;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientCache;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.api.VersionDoesNotExistException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.retention.dataset.FsCleanableHelper;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* A {@link ConfigurableGlobDatasetFinder} backed by gobblin-config-management. It uses {@link ConfigClient} to get dataset configs
*/
public class ManagedCleanableDatasetFinder
extends ConfigurableGlobDatasetFinder<ConfigurableCleanableDataset<FileSystemDatasetVersion>> {
private final ConfigClient client;
public ManagedCleanableDatasetFinder(FileSystem fs, Properties jobProps, Config config) {
this(fs, jobProps, config, ConfigClientCache.getClient(VersionStabilityPolicy.STRONG_LOCAL_STABILITY));
}
public ManagedCleanableDatasetFinder(FileSystem fs, Properties jobProps, Config config, ConfigClient client) {
super(fs, jobProps, config);
this.client = client;
}
@Override
public ConfigurableCleanableDataset<FileSystemDatasetVersion> datasetAtPath(Path path) throws IOException {
Properties datasetProps = new Properties();
datasetProps.putAll(this.props);
datasetProps.setProperty(FsCleanableHelper.RETENTION_DATASET_ROOT, path.toString());
try {
return new ConfigurableCleanableDataset<>(this.fs, datasetProps, path,
this.client
.getConfig(this.props.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI) + path),
LoggerFactory.getLogger(ConfigurableCleanableDataset.class));
} catch (VersionDoesNotExistException | ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException
| URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
}
| 2,416 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/profile/ManagedIcebergCleanableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.profile;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.LoggerFactory;
import com.typesafe.config.Config;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientCache;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.config.store.api.VersionDoesNotExistException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.retention.dataset.CleanableIcebergDataset;
import org.apache.gobblin.data.management.retention.dataset.ConfigurableCleanableDataset;
import org.apache.gobblin.data.management.retention.dataset.FsCleanableHelper;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
public class ManagedIcebergCleanableDatasetFinder extends ManagedCleanableDatasetFinder {
private final static String ICEBERG_CONFIG_PREFIX = "/iceberg";
private final ConfigClient client;
public ManagedIcebergCleanableDatasetFinder(FileSystem fs, Properties jobProps, Config config) {
this(fs, jobProps, config, ConfigClientCache.getClient(VersionStabilityPolicy.STRONG_LOCAL_STABILITY));
}
public ManagedIcebergCleanableDatasetFinder(FileSystem fs, Properties jobProps, Config config, ConfigClient client) {
super(fs, jobProps, config, client);
this.client = client;
}
@Override
public ConfigurableCleanableDataset<FileSystemDatasetVersion> datasetAtPath(Path path) throws IOException {
Properties datasetProps = new Properties();
datasetProps.putAll(this.props);
datasetProps.setProperty(FsCleanableHelper.RETENTION_DATASET_ROOT, path.toString());
try {
return new CleanableIcebergDataset<>(this.fs, datasetProps, path,
this.client.getConfig(this.props.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI) + ICEBERG_CONFIG_PREFIX + path),
LoggerFactory.getLogger(CleanableIcebergDataset.class));
} catch (ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException | URISyntaxException | VersionDoesNotExistException var3) {
throw new IllegalArgumentException(var3);
}
}
}
| 2,417 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/StringDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.util.Set;
import org.apache.hadoop.fs.Path;
/**
* @deprecated
* Dataset version extends {@link org.apache.gobblin.data.management.version.StringDatasetVersion} and implements
* {@link org.apache.gobblin.data.management.retention.version.DatasetVersion}.
*/
@Deprecated
public class StringDatasetVersion extends org.apache.gobblin.data.management.version.StringDatasetVersion implements
DatasetVersion {
public StringDatasetVersion(String version, Path path) {
super(version, path);
}
public StringDatasetVersion(org.apache.gobblin.data.management.version.StringDatasetVersion datasetVersion) {
this(datasetVersion.getVersion(), datasetVersion.getPath());
}
@Override
public Set<Path> getPathsToDelete() {
return this.getPaths();
}
}
| 2,418 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/FileStatusDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.util.Set;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Sets;
import lombok.Data;
/**
* @deprecated
* Extends {@link org.apache.gobblin.data.management.version.FileStatusDatasetVersion} and implements
* {@link org.apache.gobblin.data.management.retention.version.DatasetVersion}.
*/
@Data
@Deprecated
public class FileStatusDatasetVersion extends StringDatasetVersion {
protected final FileStatus fileStatus;
public FileStatusDatasetVersion(FileStatus fileStatus) {
super(fileStatus.getPath().getName(), fileStatus.getPath());
this.fileStatus = fileStatus;
}
public int compareTo(DatasetVersion other) {
FileStatusDatasetVersion otherAsFileStatus = (FileStatusDatasetVersion) other;
return this.fileStatus.getPath().compareTo(otherAsFileStatus.getFileStatus().getPath());
}
@Override
public boolean equals(Object obj) {
return obj != null && this.getClass().equals(obj.getClass()) && compareTo((DatasetVersion) obj) == 0;
}
@Override
public int hashCode() {
return this.fileStatus.hashCode();
}
@Override
public Set<Path> getPaths() {
return Sets.newHashSet(this.fileStatus.getPath());
}
}
| 2,419 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/DatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* @deprecated
* Extends {@link org.apache.gobblin.data.management.version.FileSystemDatasetVersion}.
*/
@Deprecated
public interface DatasetVersion extends FileSystemDatasetVersion {
/**
* Get set of {@link org.apache.hadoop.fs.Path} that should be deleted to delete this dataset version.
*
* <p>
* Each path will be deleted recursively, and the deletions will be done serially. As such, this set should be
* the minimal set of {@link org.apache.hadoop.fs.Path} that can be deleted to remove the dataset version.
* (For example, the parent directory of the files in the dataset, assuming all descendants of that
* directory are files for this dataset version).
* </p>
*
* @return Minimal set of {@link org.apache.hadoop.fs.Path} to delete in order to remove the dataset version.
*/
public Set<Path> getPathsToDelete();
}
| 2,420 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/TimestampedDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import com.google.common.collect.Lists;
/**
* @deprecated
* Extends {@link org.apache.gobblin.data.management.version.TimestampedDatasetVersion} and implements
* {@link org.apache.gobblin.data.management.retention.version.DatasetVersion}.
*/
@Deprecated
public class TimestampedDatasetVersion extends org.apache.gobblin.data.management.version.TimestampedDatasetVersion implements
DatasetVersion {
public TimestampedDatasetVersion(DateTime version, Path path) {
super(version, path);
}
public TimestampedDatasetVersion(org.apache.gobblin.data.management.version.TimestampedDatasetVersion datasetVersion) {
this(datasetVersion.getVersion(), datasetVersion.getPath());
}
@Override
public Set<Path> getPathsToDelete() {
return this.getPaths();
}
public static Collection<TimestampedDatasetVersion> convertFromGeneralVersion(
Collection<org.apache.gobblin.data.management.version.TimestampedDatasetVersion> realVersions) {
List<TimestampedDatasetVersion> timestampedVersions = Lists.newArrayList();
for (org.apache.gobblin.data.management.version.TimestampedDatasetVersion realVersion : realVersions) {
timestampedVersions.add(new TimestampedDatasetVersion(realVersion));
}
return timestampedVersions;
}
}
| 2,421 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/VersionCleaner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.io.IOException;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* An abstraction for cleaning a {@link DatasetVersion} of a {@link CleanableDataset}.
*/
@Slf4j
public abstract class VersionCleaner {
protected final CleanableDataset cleanableDataset;
protected final DatasetVersion datasetVersion;
public VersionCleaner(DatasetVersion datasetVersion, CleanableDataset cleanableDataset) {
Preconditions.checkNotNull(cleanableDataset);
Preconditions.checkNotNull(datasetVersion);
this.cleanableDataset = cleanableDataset;
this.datasetVersion = datasetVersion;
}
/**
* Action to perform before cleaning a {@link DatasetVersion} of a {@link CleanableDataset}.
* @throws IOException
*/
public abstract void preCleanAction() throws IOException;
/**
* Cleans the {@link DatasetVersion} of a {@link CleanableDataset}.
* @throws IOException
*/
public abstract void clean() throws IOException;
/**
* Action to perform after cleaning a {@link DatasetVersion} of a {@link CleanableDataset}.
* @throws IOException
*/
public abstract void postCleanAction() throws IOException;
}
| 2,422 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/HiveDatasetVersionCleaner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.thrift.TException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.retention.dataset.CleanableHiveDataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.HiveDatasetVersion;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.ConfigUtils;
/**
* An abstraction for cleaning a {@link HiveDatasetVersionCleaner} of a {@link CleanableHiveDataset}.
*/
@Slf4j
public class HiveDatasetVersionCleaner extends VersionCleaner {
public static final String REPLACEMENT_HIVE_DB_NAME_KEY = "hive.replacementHiveDbName";
public static final String REPLACEMENT_HIVE_TABLE_NAME_KEY = "hive.replacementHiveTableName";
public static final String SHOULD_REPLACE_PARTITION_KEY = "hive.shouldReplacePartition";
private final CleanableHiveDataset cleanableHiveDataset;
private final HiveDatasetVersion hiveDatasetVersion;
private final Optional<String> replacementDbName;
private final Optional<String> replacementTableName;
public HiveDatasetVersionCleaner(DatasetVersion datasetVersion, CleanableDataset cleanableDataset) {
super(datasetVersion, cleanableDataset);
Preconditions.checkArgument(cleanableDataset instanceof CleanableHiveDataset, String.format("%s only supports %s, "
+ "found: %s", this.getClass(), CleanableHiveDataset.class, cleanableDataset.getClass()));
Preconditions.checkArgument(datasetVersion instanceof HiveDatasetVersion, String.format("%s only supports %s, "
+ "found: %s", this.getClass(), HiveDatasetVersionCleaner.class, datasetVersion.getClass()));
this.cleanableHiveDataset = (CleanableHiveDataset) cleanableDataset;
this.hiveDatasetVersion = (HiveDatasetVersion) datasetVersion;
// For post cleanup activity:
// Get db / table name from which partition has to be replaced-in for the target partition being deleted.
this.replacementDbName = Optional.fromNullable(ConfigUtils.getString(cleanableHiveDataset.getDatasetConfig(), REPLACEMENT_HIVE_DB_NAME_KEY, null));
this.replacementTableName = Optional.fromNullable(ConfigUtils.getString(cleanableHiveDataset.getDatasetConfig(), REPLACEMENT_HIVE_TABLE_NAME_KEY, null));
}
@Override
public void preCleanAction() throws IOException {
// no-op
}
@Override
public void clean() throws IOException {
// Possible empty directories to clean for this partition (version)
Set<Path> possiblyEmptyDirectories = new HashSet<>();
try (AutoReturnableObject<IMetaStoreClient> client = cleanableHiveDataset.getClientPool().getClient()) {
Partition partition = hiveDatasetVersion.getPartition();
try {
if (!cleanableHiveDataset.isSimulate()) {
// As part of the cleanup process, we want to delete both: hive partition and underlying hdfs files
// However, scenarios arise where hive partition is dropped, but hdfs files aren't, leading to dangling files
// Thus, we reverse the order of cleaning up hdfs files first and then drop hive partition
// In cases where HMS was unresponsive and hive partition couldn't be dropped
// re-running hive retention would drop the partition with no hdfs files found to be deleted
// or set the flag `isShouldDeleteData` to false
if (cleanableHiveDataset.isShouldDeleteData()) {
cleanableHiveDataset.getFsCleanableHelper().clean(hiveDatasetVersion, possiblyEmptyDirectories);
}
client.get().dropPartition(partition.getTable().getDbName(), partition.getTable().getTableName(), partition.getValues(), false);
log.info("Successfully dropped partition " + partition.getCompleteName());
} else {
log.info("Simulating drop partition " + partition.getCompleteName());
}
} catch (TException | IOException e) {
log.warn(String.format("Failed to completely delete partition %s.", partition.getCompleteName()), e);
throw new IOException(e);
}
}
try {
cleanableHiveDataset.getFsCleanableHelper().cleanEmptyDirectories(possiblyEmptyDirectories, cleanableHiveDataset);
} catch (IOException ex) {
log.warn(String.format("Failed to delete at least one or more empty directories from total:{%s} with root path %s", possiblyEmptyDirectories.size(), cleanableHiveDataset.datasetRoot()), ex);
throw new IOException(ex);
}
}
@Override
public void postCleanAction() throws IOException {
// As a post-cleanup activity, Hive dataset version cleaner supports swapping-in a different partition.
// So, if configured, swap-in the other partition.
boolean shouldReplacePartition = shouldReplacePartition(cleanableHiveDataset.getDatasetConfig(),
hiveDatasetVersion.getPartition().getTable().getDbName(), hiveDatasetVersion.getPartition().getTable().getTableName(),
this.replacementDbName, this.replacementTableName);
// Replace the partition being dropped with a replacement partition from another table (if configured)
// This is required for cases such as when we want to replace-in a different storage format partition in
// .. a hybrid table. Eg. Replace ORC partition with Avro or vice-versa
if (shouldReplacePartition) {
try (AutoReturnableObject<IMetaStoreClient> client = cleanableHiveDataset.getClientPool().getClient()) {
org.apache.hadoop.hive.metastore.api.Partition sourcePartition = client.get().getPartition(
this.replacementDbName.get(),
this.replacementTableName.get(),
hiveDatasetVersion.getPartition().getValues());
org.apache.hadoop.hive.metastore.api.Partition replacementPartition = new org.apache.hadoop.hive.metastore.api.Partition(
hiveDatasetVersion.getPartition().getValues(),
hiveDatasetVersion.getPartition().getTable().getDbName(),
hiveDatasetVersion.getPartition().getTable().getTableName(),
sourcePartition.getCreateTime(),
sourcePartition.getLastAccessTime(),
sourcePartition.getSd(),
sourcePartition.getParameters());
if (!cleanableHiveDataset.isSimulate()) {
client.get().add_partition(replacementPartition);
log.info("Successfully swapped partition " + replacementPartition);
} else {
log.info("Simulating swap partition " + replacementPartition);
}
} catch (TException e) {
log.warn(String.format("Failed to swap-in replacement partition for partition being deleted: %s",
hiveDatasetVersion.getPartition().getCompleteName()), e);
throw new IOException(e);
}
}
}
/***
* Determine if a partition should be replaced-in from another table for a partition being deleted from
* the current table.
*
* @param config Config to get check if partition replacement is enabled.
* @param replacedPartitionDbName Database name for the table from where partition is being deleted.
* @param replacedPartitionTableName Table name from where partition is being deleted.
* @param replacementPartitionDbName Database name from where the partition should be registered.
* @param replacementPartitionTableName Table name from where the partition should be registered.
* @return True if partition should be replaced-in from another table.
*/
@VisibleForTesting
protected static boolean shouldReplacePartition(Config config,
String replacedPartitionDbName, String replacedPartitionTableName,
Optional<String> replacementPartitionDbName, Optional<String> replacementPartitionTableName) {
// If disabled explicitly, rest does not matters
boolean shouldReplacePartition = ConfigUtils.getBoolean(config, SHOULD_REPLACE_PARTITION_KEY, false);
// If any of the replacement DB name or replacement Table name is missing, then do not replace partition
if (!replacementPartitionDbName.isPresent() || !replacementPartitionTableName.isPresent()) {
return false;
}
// If not disabled explicitly, check if source db / table are same as the replacement partition's db / table
// .. if so, do not try replacement.
else {
return shouldReplacePartition
&& !(replacedPartitionDbName.equalsIgnoreCase(replacementPartitionDbName.get()) &&
replacedPartitionTableName.equalsIgnoreCase(replacementPartitionTableName.get()));
}
}
}
| 2,423 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/VersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
/**
* @deprecated
* See {@inheritDoc}.
*/
@Deprecated
public interface VersionFinder<T extends DatasetVersion> extends
org.apache.gobblin.data.management.version.finder.VersionFinder<T> {
}
| 2,424 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/GlobModTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.GlobModTimeDatasetVersionFinder}.
*/
@Deprecated
public class GlobModTimeDatasetVersionFinder extends DatasetVersionFinder<TimestampedDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.GlobModTimeDatasetVersionFinder realVersionFinder;
private static final String VERSION_FINDER_GLOB_PATTERN_KEY = "gobblin.retention.version.finder.pattern";
public GlobModTimeDatasetVersionFinder(FileSystem fs, Config config) {
this(fs, config.hasPath(VERSION_FINDER_GLOB_PATTERN_KEY) ? new Path(config.getString(VERSION_FINDER_GLOB_PATTERN_KEY)) : new Path("*"));
}
public GlobModTimeDatasetVersionFinder(FileSystem fs, Path globPattern) {
super(fs);
this.realVersionFinder =
new org.apache.gobblin.data.management.version.finder.GlobModTimeDatasetVersionFinder(fs, globPattern);
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return this.realVersionFinder.globVersionPattern();
}
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
org.apache.gobblin.data.management.version.TimestampedDatasetVersion timestampedDatasetVersion =
this.realVersionFinder.getDatasetVersion(pathRelativeToDatasetRoot, fullPath);
if (timestampedDatasetVersion != null) {
return new TimestampedDatasetVersion(timestampedDatasetVersion);
}
return null;
}
}
| 2,425 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/UnixTimestampVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.UnixTimestampVersionFinder}.
*/
@Deprecated
public class UnixTimestampVersionFinder extends DatasetVersionFinder<TimestampedDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.UnixTimestampVersionFinder realVersionFinder;
public UnixTimestampVersionFinder(FileSystem fs, Properties props) {
super(fs, props);
this.realVersionFinder =
new org.apache.gobblin.data.management.version.finder.UnixTimestampVersionFinder(fs, convertDeprecatedProperties(props));
}
@Override
public Path globVersionPattern() {
return this.realVersionFinder.globVersionPattern();
}
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
org.apache.gobblin.data.management.version.TimestampedDatasetVersion timestampedDatasetVersion =
this.realVersionFinder.getDatasetVersion(pathRelativeToDatasetRoot, fullPath);
if (timestampedDatasetVersion != null) {
return new TimestampedDatasetVersion(timestampedDatasetVersion);
}
return null;
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
private static Properties convertDeprecatedProperties(Properties props) {
return WatermarkDatasetVersionFinder.convertDeprecatedProperties(props);
}
}
| 2,426 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/DateTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder}.
*/
@Deprecated
public class DateTimeDatasetVersionFinder extends DatasetVersionFinder<TimestampedDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder realVersionFinder;
/**
* @deprecated use {@link #DATE_TIME_PATTERN_KEY} instead.
*/
@Deprecated
public static final String RETENTION_DATE_TIME_PATTERN_KEY = "gobblin.retention.datetime.pattern";
/**
* @deprecated use {@link #DATE_TIME_PATTERN_TIMEZONE_KEY} instead.
*/
@Deprecated
public static final String RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY = "gobblin.retention.datetime.pattern.timezone";
public DateTimeDatasetVersionFinder(FileSystem fs, Properties props) {
super(fs, convertDeprecatedProperties(props));
this.realVersionFinder = new org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder(fs, convertDeprecatedProperties(props));
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return this.realVersionFinder.globVersionPattern();
}
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus) {
org.apache.gobblin.data.management.version.TimestampedDatasetVersion timestampedDatasetVersion =
this.realVersionFinder.getDatasetVersion(pathRelativeToDatasetRoot, versionFileStatus);
if (timestampedDatasetVersion != null) {
return new TimestampedDatasetVersion(timestampedDatasetVersion);
}
return null;
}
// This Method will never be called. It exists because the deprecated super class org.apache.gobblin.data.management.retention.version.finder.DatasetVersionFinder
// requires it. getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus) will be called instead
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
throw new UnsupportedOperationException(
"This method should not be called. getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus) "
+ "should have been called instead");
}
/**
* This conversion is required because the deprecated keys {@value #RETENTION_DATE_TIME_PATTERN_KEY} and
* {@value #RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY} are not TypeSafe compatible.
* The key {@value #RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY} overwrites {@value #RETENTION_DATE_TIME_PATTERN_KEY}
* when converted from props to {@link Config}
*/
private static Properties convertDeprecatedProperties(Properties props) {
if (props.containsKey(RETENTION_DATE_TIME_PATTERN_KEY)) {
props.setProperty(org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder.DATE_TIME_PATTERN_KEY, props.getProperty(RETENTION_DATE_TIME_PATTERN_KEY));
props.remove(RETENTION_DATE_TIME_PATTERN_KEY);
}
if (props.containsKey(RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY)) {
props.setProperty(org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder.DATE_TIME_PATTERN_TIMEZONE_KEY, props.getProperty(RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY));
props.remove(RETENTION_DATE_TIME_PATTERN_TIMEZONE_KEY);
}
return props;
}
}
| 2,427 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/SingleVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.collect.Lists;
import lombok.Getter;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.data.management.retention.version.FileStatusDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.SingleVersionFinder}.
*/
@Deprecated
public class SingleVersionFinder implements VersionFinder<FileStatusDatasetVersion> {
@Getter
private FileSystem fs;
public SingleVersionFinder(FileSystem fs, Properties props) {
this.fs = fs;
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return FileStatusDatasetVersion.class;
}
@Override
public Collection<FileStatusDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
return Lists.newArrayList(new FileStatusDatasetVersion(this.fs.getFileStatus(((FileSystemDataset) dataset)
.datasetRoot())));
}
}
| 2,428 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/FileLevelTimestampVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.dataset.Dataset;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.FileLevelTimestampVersionFinder}.
*/
@Deprecated
public class FileLevelTimestampVersionFinder implements VersionFinder<TimestampedDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.FileLevelTimestampVersionFinder realVersionFinder;
public FileLevelTimestampVersionFinder(FileSystem fs, Properties props) {
this.realVersionFinder =
new org.apache.gobblin.data.management.version.finder.FileLevelTimestampVersionFinder(fs,props);
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
return TimestampedDatasetVersion.convertFromGeneralVersion(this.realVersionFinder
.findDatasetVersions(dataset));
}
}
| 2,429 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/WatermarkDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.data.management.retention.version.StringDatasetVersion;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder}.
*/
@Slf4j
@Deprecated
public class WatermarkDatasetVersionFinder extends DatasetVersionFinder<StringDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder realVersionFinder;
public static final String DEPRECATED_WATERMARK_REGEX_KEY = "gobblin.retention.watermark.regex";
public WatermarkDatasetVersionFinder(FileSystem fs, Properties props) {
super(fs, props);
this.realVersionFinder =
new org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder(fs, convertDeprecatedProperties(props));
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return StringDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return this.realVersionFinder.globVersionPattern();
}
@Override
public StringDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
org.apache.gobblin.data.management.version.StringDatasetVersion stringDatasetVersion =
this.realVersionFinder.getDatasetVersion(pathRelativeToDatasetRoot, fullPath);
if (stringDatasetVersion != null) {
return new StringDatasetVersion(stringDatasetVersion);
}
return null;
}
public static Properties convertDeprecatedProperties(Properties props) {
if (props.containsKey(DEPRECATED_WATERMARK_REGEX_KEY)) {
log.info(String.format("Found deprecated key %s. Replacing it with %s", DEPRECATED_WATERMARK_REGEX_KEY,
org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder.WATERMARK_REGEX_KEY));
props.setProperty(org.apache.gobblin.data.management.version.finder.WatermarkDatasetVersionFinder.WATERMARK_REGEX_KEY,
props.getProperty(DEPRECATED_WATERMARK_REGEX_KEY));
props.remove(DEPRECATED_WATERMARK_REGEX_KEY);
}
return props;
}
}
| 2,430 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/ModDateTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.retention.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
import org.apache.gobblin.dataset.Dataset;
/**
* @deprecated
* See javadoc for {@link org.apache.gobblin.data.management.version.finder.ModDateTimeDatasetVersionFinder}.
*/
@Deprecated
public class ModDateTimeDatasetVersionFinder implements VersionFinder<TimestampedDatasetVersion> {
private final org.apache.gobblin.data.management.version.finder.ModDateTimeDatasetVersionFinder realVersionFinder;
public ModDateTimeDatasetVersionFinder(FileSystem fs, Properties props) {
this.realVersionFinder = new org.apache.gobblin.data.management.version.finder.ModDateTimeDatasetVersionFinder(fs, props);
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
return TimestampedDatasetVersion.convertFromGeneralVersion(this.realVersionFinder.findDatasetVersions(dataset));
}
}
| 2,431 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/version/finder/DatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.version.finder;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
/**
* @deprecated
* See {@inheritDoc}.
*/
@Deprecated
public abstract class DatasetVersionFinder<T extends DatasetVersion> extends
org.apache.gobblin.data.management.version.finder.DatasetVersionFinder<T> implements VersionFinder<T> {
public DatasetVersionFinder(FileSystem fs, Properties props) {
super(fs, props);
}
public DatasetVersionFinder(FileSystem fs) {
super(fs, new Properties());
}
}
| 2,432 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/DeleteAllRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
/**
* Implementation of {@link RetentionPolicy} that marks all {@link DatasetVersion}s as deletable.
*/
@Alias("DeleteAll")
public class DeleteAllRetentionPolicy implements RetentionPolicy<DatasetVersion> {
public DeleteAllRetentionPolicy(Properties properties) {}
@Override
public Class<? extends DatasetVersion> versionClass() {
return DatasetVersion.class;
}
@Override
public Collection<DatasetVersion> listDeletableVersions(List<DatasetVersion> allVersions) {
return allVersions;
}
}
| 2,433 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/RawDatasetRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.util.FileListUtils;
/**
* An abstract {@link RetentionPolicy} for {@link org.apache.gobblin.data.management.retention.dataset.RawDataset}.
*
* This class embeds another {@link RetentionPolicy}. In {@link #listDeletableVersions(List)} it applies the
* embedded {@link RetentionPolicy}'s predicate, as well as {@link #listQualifiedRawFileSystemDatasetVersions(Collection)}.
*/
@Alpha
public abstract class RawDatasetRetentionPolicy implements RetentionPolicy<FileSystemDatasetVersion> {
private final FileSystem fs;
private final Class<? extends FileSystemDatasetVersion> versionClass;
private final RetentionPolicy<FileSystemDatasetVersion> embeddedRetentionPolicy;
public RawDatasetRetentionPolicy(FileSystem fs, Class<? extends FileSystemDatasetVersion> versionClass,
RetentionPolicy<FileSystemDatasetVersion> retentionPolicy) {
this.fs = fs;
this.versionClass = versionClass;
this.embeddedRetentionPolicy = retentionPolicy;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return this.versionClass;
}
@Override
public Collection<FileSystemDatasetVersion> listDeletableVersions(List<FileSystemDatasetVersion> allVersions) {
Collection<FileSystemDatasetVersion> deletableVersions = this.embeddedRetentionPolicy.listDeletableVersions(allVersions);
return listQualifiedRawFileSystemDatasetVersions(deletableVersions);
}
/**
* A raw dataset version is qualified to be deleted, iff the corresponding refined paths exist, and the latest
* mod time of all files is in the raw dataset is earlier than the latest mod time of all files in the refined paths.
*/
protected Collection<FileSystemDatasetVersion> listQualifiedRawFileSystemDatasetVersions(Collection<FileSystemDatasetVersion> allVersions) {
return Lists.newArrayList(Collections2.filter(allVersions, new Predicate<FileSystemDatasetVersion>() {
@Override
public boolean apply(FileSystemDatasetVersion version) {
Iterable<Path> refinedDatasetPaths = getRefinedDatasetPaths(version);
try {
Optional<Long> latestRawDatasetModTime = getLatestModTime(version.getPaths());
Optional<Long> latestRefinedDatasetModTime = getLatestModTime(refinedDatasetPaths);
return latestRawDatasetModTime.isPresent() && latestRefinedDatasetModTime.isPresent()
&& latestRawDatasetModTime.get() <= latestRefinedDatasetModTime.get();
} catch (IOException e) {
throw new RuntimeException("Failed to get modification time", e);
}
}
}));
}
private Optional<Long> getLatestModTime(Iterable<Path> paths) throws IOException {
long latestModTime = Long.MIN_VALUE;
for (FileStatus status : FileListUtils.listMostNestedPathRecursively(this.fs, paths)) {
latestModTime = Math.max(latestModTime, status.getModificationTime());
}
return latestModTime == Long.MIN_VALUE ? Optional.<Long> absent() : Optional.of(latestModTime);
}
/**
* Get the corresponding refined paths for a raw dataset version. For example, a raw dataset version
* can be a file containing un-deduplicated records, whose corresponding refined dataset path is a file
* containing the corresponding deduplicated records.
*/
protected abstract Iterable<Path> getRefinedDatasetPaths(FileSystemDatasetVersion version);
}
| 2,434 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/TimeBasedRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import org.joda.time.format.ISOPeriodFormat;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* Retain dataset versions newer than now - {@link #retention}.
*/
@Slf4j
@Alias("TimeBased")
public class TimeBasedRetentionPolicy implements RetentionPolicy<TimestampedDatasetVersion> {
public static final String RETENTION_MINUTES_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX + "minutes.retained";
// ISO8601 Standard PyYmMwWdDThHmMsS
public static final String RETENTION_TIMEBASED_DURATION_KEY =
DatasetCleaner.CONFIGURATION_KEY_PREFIX + "timebased.duration";
private final Duration retention;
public TimeBasedRetentionPolicy(Properties props) {
this(ConfigUtils.propertiesToConfig(props));
}
/**
* Creates a new {@link TimeBasedRetentionPolicy} using {@link #RETENTION_TIMEBASED_DURATION_KEY} in the
* <code>config</code>
* <ul> Some Example values for {@link #RETENTION_TIMEBASED_DURATION_KEY} are
* <li> P20D = 20 Days
* <li> P20H = 20 Hours
* <li> P2Y = 2 Years
* <li> P2Y3M = 2 Years and 3 Months
* <li> PT23M = 23 Minutes (Note this is different from P23M which is 23 Months)
* </ul>
*
* @param config that holds retention duration in ISO8061 format at key {@link #RETENTION_TIMEBASED_DURATION_KEY}.
*/
public TimeBasedRetentionPolicy(Config config) {
this.retention = getDuration(config);
log.info(String.format("%s will delete dataset versions older than %s.", TimeBasedRetentionPolicy.class.getName(),
this.retention.toString()));
}
public TimeBasedRetentionPolicy(String duration) {
this.retention = parseDuration(duration);
log.info(String
.format("%s will delete dataset versions older than %s.", TimeBasedRetentionPolicy.class.getName(), duration));
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> listDeletableVersions(List<TimestampedDatasetVersion> allVersions) {
return Lists.newArrayList(Collections2.filter(allVersions, new Predicate<DatasetVersion>() {
@Override
public boolean apply(DatasetVersion version) {
return ((TimestampedDatasetVersion) version).getDateTime().plus(TimeBasedRetentionPolicy.this.retention)
.isBeforeNow();
}
}));
}
/**
* Since months and years can have arbitrary days, joda time does not allow conversion of a period string containing
* months or years to a duration. Hence we calculate the duration using 1970 01:01:00:00:00 UTC as a reference time.
*
* <p>
* <code>
* (1970 01:01:00:00:00 + P2Y) - 1970 01:01:00:00:00 = Duration for 2 years
* </code>
* </p>
*
* @param periodString
* @return duration for this period.
*/
private static Duration parseDuration(String periodString) {
DateTime zeroEpoc = new DateTime(0);
return new Duration(zeroEpoc, zeroEpoc.plus(ISOPeriodFormat.standard().parsePeriod(periodString)));
}
private static Duration getDuration(Config config) {
Preconditions
.checkArgument(config.hasPath(RETENTION_TIMEBASED_DURATION_KEY) || config.hasPath(RETENTION_MINUTES_KEY),
String.format("Either %s or %s needs to be set", RETENTION_TIMEBASED_DURATION_KEY, RETENTION_MINUTES_KEY));
if (config.hasPath(RETENTION_TIMEBASED_DURATION_KEY)) {
return parseDuration(config.getString(RETENTION_TIMEBASED_DURATION_KEY));
} else {
return Duration.standardMinutes(Long.parseLong(config.getString(RETENTION_MINUTES_KEY)));
}
}
}
| 2,435 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/RetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.util.Collection;
import java.util.List;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* Retention policy around versions of a dataset. Specifies which versions of a dataset should be deleted by
* {@link org.apache.gobblin.data.management.retention.DatasetCleaner}.
* @param <T> {@link org.apache.gobblin.data.management.retention.version.DatasetVersion} accepted by this policy.
*/
public interface RetentionPolicy<T extends DatasetVersion> {
/**
* Should return class of T.
* @return class of T.
*/
public Class<? extends DatasetVersion> versionClass();
/**
* @deprecated use {link org.apache.gobblin.data.management.policy.VersionSelectionPolicy#listSelectedVersions} instead.
* Logic to decide which dataset versions should be deleted. Only datasets returned will be deleted from filesystem.
*
* @param allVersions List of all dataset versions in the file system,
* sorted from newest to oldest.
* @return Collection of dataset versions that should be deleted.
*/
@Deprecated
public Collection<T> listDeletableVersions(List<T> allVersions);
}
| 2,436 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/NewestKRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* Retains the newest k versions of the dataset.
*/
@Alias("NewestK")
public class NewestKRetentionPolicy<T extends DatasetVersion> implements RetentionPolicy<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(NewestKRetentionPolicy.class);
/**
* @deprecated use {@link #NEWEST_K_VERSIONS_RETAINED_KEY}
*/
@Deprecated
public static final String VERSIONS_RETAINED_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX +
"versions.retained";
public static final String NEWEST_K_VERSIONS_RETAINED_KEY = DatasetCleaner.CONFIGURATION_KEY_PREFIX +
"newestK.versions.retained";
public static final String VERSIONS_RETAINED_DEFAULT = Integer.toString(2);
private final int versionsRetained;
public NewestKRetentionPolicy(int versionsRetained) {
this.versionsRetained = versionsRetained;
LOGGER.info(String.format("%s will retain %d versions of each dataset.",
NewestKRetentionPolicy.class.getName(), this.versionsRetained));
}
public NewestKRetentionPolicy(Properties props) {
if (props.containsKey(VERSIONS_RETAINED_KEY)) {
this.versionsRetained = Integer.parseInt(props.getProperty(VERSIONS_RETAINED_KEY));
} else if (props.containsKey(NEWEST_K_VERSIONS_RETAINED_KEY)) {
this.versionsRetained = Integer.parseInt(props.getProperty(NEWEST_K_VERSIONS_RETAINED_KEY));
} else {
this.versionsRetained = Integer.parseInt(VERSIONS_RETAINED_DEFAULT);
}
}
public NewestKRetentionPolicy(Config config) {
this(Integer.parseInt(config.getString(NEWEST_K_VERSIONS_RETAINED_KEY)));
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return DatasetVersion.class;
}
@Override
public Collection<T> listDeletableVersions(List<T> allVersions) {
int newerVersions = 0;
List<T> deletableVersions = Lists.newArrayList();
for(T datasetVersion : allVersions) {
if(newerVersions >= this.versionsRetained) {
deletableVersions.add(datasetVersion);
}
newerVersions++;
}
return deletableVersions;
}
}
| 2,437 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/CombineRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import javax.annotation.Nullable;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.PropertiesUtils;
/**
* Implementation of {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} that allows combining different
* policies through a union or intersect operation. It will combine the delete sets from each sub-policy using the
* specified operation.
*
* <p>
* For example, if there are five versions of a dataset, a, b, c, d, e, policy1 would delete versions a, b, while
* policy2 would delete versions b,c, using {@link CombineRetentionPolicy} will delete versions a, b, c if the
* operation is UNION, or it will delete only version b if the operation is INTERSECT.
* </p>
*
* <p>
* {@link CombineRetentionPolicy} expects the following configurations:
* * gobblin.retention.combine.retention.policy.class.* : specifies the classes of the policies to combine. * can be
* any value, and each such configuration defines only one class.
* * gobblin.retention.combine.retention.policy.delete.sets.combine.operation : operation used to combine delete
* sets. Can be UNION or INTERSECT.
* Additionally, any configuration necessary for combined policies must be specified.
* </p>
*/
public class CombineRetentionPolicy<T extends DatasetVersion> implements RetentionPolicy<T> {
public static final String COMBINE_RETENTION_POLICIES =
DatasetCleaner.CONFIGURATION_KEY_PREFIX + "combine.retention.policy.classes";
/**
* @Deprecated , use COMBINE_RETENTION_POLICIES instead.
*/
public static final String RETENTION_POLICIES_PREFIX =
DatasetCleaner.CONFIGURATION_KEY_PREFIX + "combine.retention.policy.class.";
public static final String DELETE_SETS_COMBINE_OPERATION =
DatasetCleaner.CONFIGURATION_KEY_PREFIX + "combine.retention.policy.delete.sets.combine.operation";
private static final Splitter COMMA_BASED_SPLITTER = Splitter.on(",").omitEmptyStrings().trimResults();
public enum DeletableCombineOperation {
INTERSECT,
UNION
}
private final List<RetentionPolicy<T>> retentionPolicies;
private final DeletableCombineOperation combineOperation;
public CombineRetentionPolicy(List<RetentionPolicy<T>> retentionPolicies,
DeletableCombineOperation combineOperation) {
this.combineOperation = combineOperation;
this.retentionPolicies = retentionPolicies;
}
@SuppressWarnings("unchecked")
public CombineRetentionPolicy(Properties props) throws IOException {
Preconditions.checkArgument(props.containsKey(DELETE_SETS_COMBINE_OPERATION), "Combine operation not specified.");
this.retentionPolicies = findRetentionPolicies(props);
if (this.retentionPolicies.size() == 0) {
throw new IOException("No retention policies specified for " + CombineRetentionPolicy.class.getCanonicalName());
}
this.combineOperation =
DeletableCombineOperation.valueOf(props.getProperty(DELETE_SETS_COMBINE_OPERATION).toUpperCase());
}
private List<RetentionPolicy<T>> findRetentionPolicies(Properties props) {
List<String> retentionPolicyClasses;
ImmutableList.Builder<RetentionPolicy<T>> builder = ImmutableList.builder();
ClassAliasResolver<?> aliasResolver = new ClassAliasResolver<>(RetentionPolicy.class);
if (props.containsKey(COMBINE_RETENTION_POLICIES)) {
retentionPolicyClasses = COMMA_BASED_SPLITTER.splitToList(props.getProperty(COMBINE_RETENTION_POLICIES));
} else {
retentionPolicyClasses = PropertiesUtils.getValuesAsList(props, Optional.of(RETENTION_POLICIES_PREFIX));
}
for (String retentionPolicyClass : retentionPolicyClasses) {
try {
builder.add((RetentionPolicy<T>) ConstructorUtils.invokeConstructor(
Class.forName(aliasResolver.resolve(retentionPolicyClass)), props));
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(e);
}
}
return builder.build();
}
/**
* Returns the most specific common superclass for the {@link #versionClass} of each embedded policy.
*/
@SuppressWarnings("unchecked")
@Override
public Class<T> versionClass() {
if (this.retentionPolicies.size() == 1) {
return (Class<T>) this.retentionPolicies.get(0).versionClass();
}
Class<T> klazz = (Class<T>) this.retentionPolicies.get(0).versionClass();
for (RetentionPolicy<T> policy : this.retentionPolicies) {
klazz = commonSuperclass(klazz, (Class<T>) policy.versionClass());
}
return klazz;
}
@Override
public Collection<T> listDeletableVersions(final List<T> allVersions) {
List<Set<T>> candidateDeletableVersions =
Lists.newArrayList(Iterables.transform(this.retentionPolicies, new Function<RetentionPolicy<T>, Set<T>>() {
@SuppressWarnings("deprecation")
@Nullable
@Override
public Set<T> apply(RetentionPolicy<T> input) {
return Sets.newHashSet(input.listDeletableVersions(allVersions));
}
}));
switch (this.combineOperation) {
case INTERSECT:
return intersectDatasetVersions(candidateDeletableVersions);
case UNION:
return unionDatasetVersions(candidateDeletableVersions);
default:
throw new RuntimeException("Combine operation " + this.combineOperation + " not recognized.");
}
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public Class<T> commonSuperclass(Class<T> classA, Class<T> classB) {
if (classA.isAssignableFrom(classB)) {
// a is superclass of b, so return class of a
return classA;
}
// a is not superclass of b. Either b is superclass of a, or they are not in same branch
// find closest superclass of a that is also a superclass of b
Class<?> klazz = classA;
while (!klazz.isAssignableFrom(classB)) {
klazz = klazz.getSuperclass();
}
if (DatasetVersion.class.isAssignableFrom(klazz)) {
return (Class<T>) klazz;
}
// this should never happen, but there for safety
return (Class<T>) DatasetVersion.class;
}
private Set<T> intersectDatasetVersions(Collection<Set<T>> sets) {
if (sets.size() <= 0) {
return Sets.newHashSet();
}
Iterator<Set<T>> it = sets.iterator();
Set<T> outputSet = it.next();
while (it.hasNext()) {
outputSet = Sets.intersection(outputSet, it.next());
}
return outputSet;
}
private Set<T> unionDatasetVersions(Collection<Set<T>> sets) {
if (sets.size() <= 0) {
return Sets.newHashSet();
}
Iterator<Set<T>> it = sets.iterator();
Set<T> outputSet = it.next();
while (it.hasNext()) {
outputSet = Sets.union(outputSet, it.next());
}
return outputSet;
}
}
| 2,438 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/DeleteNothingRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
/**
* A {@link RetentionPolicy} that does not delete any versions. Basically a pass through dummy policy.
*/
@Alias("dummy")
public class DeleteNothingRetentionPolicy implements RetentionPolicy<DatasetVersion> {
public DeleteNothingRetentionPolicy(Properties properties) {}
public DeleteNothingRetentionPolicy(Config conf) {}
@Override
public Class<? extends DatasetVersion> versionClass() {
return DatasetVersion.class;
}
@Override
public Collection<DatasetVersion> listDeletableVersions(List<DatasetVersion> allVersions) {
return Lists.newArrayList();
}
}
| 2,439 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/PredicateRetentionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy;
import java.lang.reflect.InvocationTargetException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.version.DatasetVersion;
/**
* Implementation of {@link RetentionPolicy} that marks a {@link DatasetVersion} for deletion if it does not pass a
* specified {@link Predicate}. The {@link Predicate} class is determined by the key
* {@link #RETENTION_POLICY_PREDICATE_CLASS}.
*/
@Alias("PredicateRetention")
public class PredicateRetentionPolicy implements RetentionPolicy<DatasetVersion> {
private final Predicate<DatasetVersion> predicate;
private static final String RETENTION_POLICY_PREDICATE_CLASS = "org.apache.gobblin.retention.retention.policy.predicate.class";
@SuppressWarnings("unchecked")
public PredicateRetentionPolicy(Properties props) throws InstantiationException, IllegalAccessException,
ClassNotFoundException, IllegalArgumentException, SecurityException, InvocationTargetException,
NoSuchMethodException {
this.predicate =
(Predicate<DatasetVersion>) Class.forName(props.getProperty(RETENTION_POLICY_PREDICATE_CLASS))
.getConstructor(Properties.class).newInstance(props);
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return DatasetVersion.class;
}
@Override
public Collection<DatasetVersion> listDeletableVersions(List<DatasetVersion> allVersions) {
return Lists.newArrayList(Iterables.filter(allVersions, Predicates.not(this.predicate)));
}
}
| 2,440 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/policy/predicates/WhitelistPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.policy.predicates;
import java.util.Properties;
import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.data.management.retention.version.StringDatasetVersion;
/**
* {@link com.google.common.base.Predicate} for {@link org.apache.gobblin.data.management.retention.policy.PredicateRetentionPolicy}
* that passes versions matching a user supplied regular expression. (i.e. versions matching the regex will not be
* deleted).
*/
public class WhitelistPredicate implements Predicate<StringDatasetVersion> {
public static final String WHITELIST_PATTERN_KEY =
DatasetCleaner.CONFIGURATION_KEY_PREFIX + "retention.whitelist.pattern";
private final Pattern whitelist;
public WhitelistPredicate(Properties properties) {
Preconditions.checkArgument(properties.containsKey(WHITELIST_PATTERN_KEY));
this.whitelist = Pattern.compile(properties.getProperty(WHITELIST_PATTERN_KEY));
}
@Override
public boolean apply(StringDatasetVersion input) {
return this.whitelist.matcher(input.getVersion()).find();
}
}
| 2,441 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/Dataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
/**
* {@inheritDoc}
*/
public interface Dataset extends org.apache.gobblin.dataset.Dataset {
}
| 2,442 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/SimpleFileSystemDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* A basic implementation of {@link FileSystemDataset}. It can represent a virtual
* file system dataset which doesn't have a physical file/folder
*/
public class SimpleFileSystemDataset implements FileSystemDataset {
private final Path path;
private final boolean _isVirtual;
public SimpleFileSystemDataset(Path path) {
this(path, false);
}
public SimpleFileSystemDataset(Path path, boolean isVirtual) {
this.path = path;
_isVirtual = isVirtual;
}
@Override
public Path datasetRoot() {
return path;
}
@Override
public String datasetURN() {
return path.toString();
}
@Override
public boolean isVirtual() {
return _isVirtual;
}
} | 2,443 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/SimpleDatasetHierarchicalPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.io.Serializable;
import java.util.Comparator;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.collect.Maps;
import lombok.AllArgsConstructor;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.AllEqualComparator;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.request_allocation.Requestor;
import org.apache.gobblin.util.request_allocation.SimpleHierarchicalPrioritizer;
/**
* A simple type of {@link SimpleHierarchicalPrioritizer} which prioritize {@link Dataset} based on their tier name.
*
* 1-1-1 mapping between {@link Dataset} - {@link SimpleDatasetRequest} - {@link SimpleDatasetRequestor}
*
* {@link org.apache.gobblin.util.request_allocation.HierarchicalAllocator} will use {@link TierComparator} from this class
* to shuffle {@link SimpleDatasetRequestor}s so that high priority tiers will appear in front of low priority tiers.
*
* Usage:
* {@link #TIER_KEY}.<tier-number>=<whitelist-blacklist-pattern>
* Example:
* {@link #TIER_KEY}.0 = pattern_0
* {@link #TIER_KEY}.1 = pattern_1
*/
@Alias("TieredDatasets")
public class SimpleDatasetHierarchicalPrioritizer extends SimpleHierarchicalPrioritizer<SimpleDatasetRequest>
implements Serializable {
public static final String CONFIGURATION_PREFIX = "gobblin.prioritizer.datasetTiering";
public static final String TIER_KEY = CONFIGURATION_PREFIX + ".tier";
private static final Pattern TIER_PATTERN = Pattern.compile(TIER_KEY + "\\.([0-9]+)");
public SimpleDatasetHierarchicalPrioritizer(State state) throws IOException {
super(SimpleDatasetHierarchicalPrioritizer
.createRequestorComparator(state), new AllEqualComparator());
}
public static Comparator<Requestor<SimpleDatasetRequest>> createRequestorComparator(State state) throws IOException {
TreeMap<Integer, Pattern> tiers = Maps.newTreeMap();
Matcher matcher;
for (Map.Entry<Object, Object> entry : state.getProperties().entrySet()) {
if (entry.getKey() instanceof String && entry.getValue() instanceof String
&& (matcher = TIER_PATTERN.matcher((String) entry.getKey())).matches()) {
int tier = Integer.parseInt(matcher.group(1));
String regex = (String)entry.getValue();
tiers.put(tier, Pattern.compile(regex));
}
}
return new SimpleDatasetHierarchicalPrioritizer.TierComparator(tiers);
}
@AllArgsConstructor
private static class TierComparator implements Comparator<Requestor<SimpleDatasetRequest>>, Serializable {
private final TreeMap<Integer, Pattern> tiersMap;
@Override
public int compare(Requestor<SimpleDatasetRequest> o1, Requestor<SimpleDatasetRequest> o2) {
return Integer.compare(findTier(o1), findTier(o2));
}
private int findTier(Requestor<SimpleDatasetRequest> requestor) {
Dataset dataset = ((SimpleDatasetRequestor) requestor).getDataset();
for (Map.Entry<Integer, Pattern> tier : tiersMap.entrySet()) {
Pattern pattern = tier.getValue();
if (pattern.matcher(dataset.datasetURN()).find()) {
return tier.getKey();
}
}
return Integer.MAX_VALUE;
}
}
}
| 2,444 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/DatasetsFinderFilteringDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.function.CheckedExceptionPredicate;
/**
* A decorator for filtering datasets after a {@link DatasetsFinder} finds a {@link List} of {@link Dataset}s
*/
@Slf4j
public class DatasetsFinderFilteringDecorator<T extends Dataset> implements DatasetsFinder<T> {
private static final String PREFIX = "filtering.datasets.finder.";
public static final String DATASET_CLASS = PREFIX + "class";
public static final String ALLOWED = PREFIX + "allowed.predicates";
public static final String DENIED = PREFIX + "denied.predicates";
protected DatasetsFinder<T> datasetFinder;
protected List<CheckedExceptionPredicate<T,IOException>> allowDatasetPredicates;
protected List<CheckedExceptionPredicate<T,IOException>> denyDatasetPredicates;
public DatasetsFinderFilteringDecorator(FileSystem fs, Properties properties) throws IOException {
this.datasetFinder = DatasetUtils.instantiateDatasetFinder(
DATASET_CLASS, properties, fs, DefaultFileSystemGlobFinder.class.getName());
this.allowDatasetPredicates = instantiatePredicates(ALLOWED, properties);
this.denyDatasetPredicates = instantiatePredicates(DENIED, properties);
}
@VisibleForTesting
DatasetsFinderFilteringDecorator(
DatasetsFinder<T> datasetsFinder,
List<CheckedExceptionPredicate<T,IOException>> allowDatasetPredicates,
List<CheckedExceptionPredicate<T,IOException>> denyDatasetPredicates) {
this.datasetFinder = datasetsFinder;
this.allowDatasetPredicates = allowDatasetPredicates;
this.denyDatasetPredicates = denyDatasetPredicates;
}
@Override
public List<T> findDatasets() throws IOException {
List<T> datasets = datasetFinder.findDatasets();
log.info("Found {} datasets", datasets.size());
List<T> allowedDatasets = Collections.emptyList();
try {
allowedDatasets = datasets.stream()
.filter(dataset -> allowDatasetPredicates.stream()
.map(CheckedExceptionPredicate::wrapToTunneled)
.allMatch(p -> p.test(dataset)))
.filter(dataset -> denyDatasetPredicates.stream()
.map(CheckedExceptionPredicate::wrapToTunneled)
.noneMatch(predicate -> predicate.test(dataset)))
.collect(Collectors.toList());
} catch (CheckedExceptionPredicate.WrappedIOException wrappedIOException) {
wrappedIOException.rethrowWrapped();
}
log.info("Allowed {}/{} datasets", allowedDatasets.size() ,datasets.size());
return allowedDatasets;
}
@Override
public Path commonDatasetRoot() {
return datasetFinder.commonDatasetRoot();
}
private List<CheckedExceptionPredicate<T,IOException>> instantiatePredicates(String key, Properties props)
throws IOException {
List<CheckedExceptionPredicate<T,IOException>> predicates = new ArrayList<>();
try {
for (String className : PropertiesUtils.getPropAsList(props, key)) {
predicates.add((CheckedExceptionPredicate<T, IOException>)
ConstructorUtils.invokeConstructor(Class.forName(className), props));
}
return predicates;
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
}
}
| 2,445 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/DatasetUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.collect.Lists;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.IterableDatasetFinderImpl;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.CopyableFileFilter;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* Utilities for datasets.
*/
public class DatasetUtils {
public static final String CONFIGURATION_KEY_PREFIX = "gobblin.dataset.";
public static final String DATASET_PROFILE_CLASS_KEY = CONFIGURATION_KEY_PREFIX + "profile.class";
public static final String PATH_FILTER_KEY = CONFIGURATION_KEY_PREFIX + "path.filter.class";
private static final String COPYABLE_FILE_FILTER_KEY = CONFIGURATION_KEY_PREFIX + "copyable.file.filter.class";
private static final PathFilter ACCEPT_ALL_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return true;
}
};
private static final CopyableFileFilter ACCEPT_ALL_COPYABLE_FILE_FILTER = new CopyableFileFilter() {
@Override
public Collection<CopyableFile> filter(FileSystem sourceFs, FileSystem targetFs,
Collection<CopyableFile> copyableFiles) {
return copyableFiles;
}
};
/**
* Instantiate a {@link DatasetsFinder}. The class of the {@link DatasetsFinder} is read from property
* {@link #DATASET_PROFILE_CLASS_KEY}.
*
* @param props Properties used for building {@link DatasetsFinder}.
* @param fs {@link FileSystem} where datasets are located.
* @return A new instance of {@link DatasetsFinder}.
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T extends org.apache.gobblin.dataset.Dataset> DatasetsFinder<T> instantiateDatasetFinder(
Properties props, FileSystem fs, String default_class, Object... additionalArgs)
throws IOException {
return instantiateDatasetFinder(DATASET_PROFILE_CLASS_KEY, props, fs, default_class, additionalArgs);
}
@SuppressWarnings("unchecked")
public static <T extends org.apache.gobblin.dataset.Dataset> DatasetsFinder<T> instantiateDatasetFinder(
String classKey, Properties props, FileSystem fs, String default_class, Object... additionalArgs)
throws IOException{
String className = default_class;
if (props.containsKey(classKey)) {
className = props.getProperty(classKey);
}
try {
Class<?> datasetFinderClass = Class.forName(className);
List<Object> args = Lists.newArrayList(fs, props);
if (additionalArgs != null) {
args.addAll(Lists.newArrayList(additionalArgs));
}
return (DatasetsFinder<T>) GobblinConstructorUtils.invokeLongestConstructor(datasetFinderClass, args.toArray());
} catch (ReflectiveOperationException exception) {
throw new IOException(exception);
}
}
public static <T extends org.apache.gobblin.dataset.Dataset> IterableDatasetFinder<T> instantiateIterableDatasetFinder(
Properties props, FileSystem fs, String default_class, Object... additionalArgs) throws IOException {
DatasetsFinder<T> datasetsFinder = instantiateDatasetFinder(props, fs, default_class, additionalArgs);
return datasetsFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<T>) datasetsFinder
: new IterableDatasetFinderImpl<>(datasetsFinder);
}
/**
* Instantiate a {@link PathFilter} from the class name at key {@link #PATH_FILTER_KEY} in props passed. If key
* {@link #PATH_FILTER_KEY} is not set, a default {@link #ACCEPT_ALL_PATH_FILTER} is returned
*
* @param props that contain path filter classname at {@link #PATH_FILTER_KEY}
* @return a new instance of {@link PathFilter}. If not key is found, returns an {@link #ACCEPT_ALL_PATH_FILTER}
*/
public static PathFilter instantiatePathFilter(Properties props) {
if (!props.containsKey(PATH_FILTER_KEY)) {
return ACCEPT_ALL_PATH_FILTER;
}
try {
Class<?> pathFilterClass = Class.forName(props.getProperty(PATH_FILTER_KEY));
return (PathFilter) GobblinConstructorUtils.invokeLongestConstructor(pathFilterClass,
PropertiesUtils.extractPropertiesWithPrefixAfterRemovingPrefix(props, CONFIGURATION_KEY_PREFIX));
} catch (ReflectiveOperationException exception) {
throw new RuntimeException(exception);
}
}
/**
* Instantiate a {@link CopyableFileFilter} from the class name at key {@link #COPYABLE_FILE_FILTER_KEY} in props
* passed. If key {@link #COPYABLE_FILE_FILTER_KEY} is not set, a default {@link #ACCEPT_ALL_COPYABLE_FILE_FILTER} is
* returned
*
* @param props that contain path filter classname at {@link #COPYABLE_FILE_FILTER_KEY}
* @return a new instance of {@link PathFilter}. If not key is found, returns an
* {@link #ACCEPT_ALL_COPYABLE_FILE_FILTER}
*/
public static CopyableFileFilter instantiateCopyableFileFilter(Properties props, Object... additionalArgs) {
if (!props.containsKey(COPYABLE_FILE_FILTER_KEY)) {
return ACCEPT_ALL_COPYABLE_FILE_FILTER;
}
try {
Class<?> copyableFileFilterClass = Class.forName(props.getProperty(COPYABLE_FILE_FILTER_KEY));
return (CopyableFileFilter) GobblinConstructorUtils
.invokeLongestConstructor(copyableFileFilterClass, additionalArgs);
} catch (ReflectiveOperationException exception) {
throw new RuntimeException(exception);
}
}
}
| 2,446 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/DummyDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import lombok.RequiredArgsConstructor;
import java.io.IOException;
import java.util.Collection;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
/**
* Dummy {@link Dataset} that does nothing.
*/
@RequiredArgsConstructor
public class DummyDataset implements CopyableDataset, CleanableDataset, FileSystemDataset {
private final Path datasetRoot;
@Override public void clean() throws IOException {
// Do nothing
}
@Override public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs,
CopyConfiguration configuration)
throws IOException {
return ImmutableList.of();
}
@Override public Path datasetRoot() {
return this.datasetRoot;
}
@Override public String datasetURN() {
return datasetRoot().toString();
}
}
| 2,447 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/SimpleDatasetRequestor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.util.Comparator;
import java.util.Iterator;
import com.google.common.collect.Iterators;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.request_allocation.PushDownRequestor;
/**
* A simple {@link org.apache.gobblin.util.request_allocation.Requestor} used to generate a single {@link SimpleDatasetRequest}
*/
@AllArgsConstructor
public class SimpleDatasetRequestor implements PushDownRequestor<SimpleDatasetRequest> {
@Getter
private Dataset dataset;
@Override
public Iterator<SimpleDatasetRequest> getRequests(Comparator<SimpleDatasetRequest> prioritizer)
throws IOException {
return Iterators.singletonIterator(new SimpleDatasetRequest(dataset, this));
}
@Override
public Iterator<SimpleDatasetRequest> iterator() {
return Iterators.singletonIterator(new SimpleDatasetRequest(dataset, this));
}
}
| 2,448 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/TimePartitionGlobFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.time.Duration;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.time.TimeIterator;
import org.apache.gobblin.util.PathUtils;
/**
* A {@link TimePartitionGlobFinder} finds all dataset time partitions within time window
* [current time - look back time, current time]. It derives an efficient dataset partition pattern based
* on the time window and a supported {@value #TIME_FORMAT}.
*
* <p> If {@value #ENABLE_VIRTUAL_PARTITION} is set, it will create virtual {@link SimpleFileSystemDataset}
* instances if a partition within the time window doesn't exist
*/
@Slf4j
public class TimePartitionGlobFinder implements DatasetsFinder<FileSystemDataset> {
private static final String CONF_PREFIX = "timePartitionGlobFinder.";
public static final String PARTITION_PREFIX = CONF_PREFIX + "partitionPrefix";
public static final String TIME_FORMAT = CONF_PREFIX + "timeFormat";
public static final String ENABLE_VIRTUAL_PARTITION = CONF_PREFIX + "enableVirtualPartition";
/**
* Options are enumerated in {@link org.apache.gobblin.time.TimeIterator.Granularity}
*/
public static final String GRANULARITY = CONF_PREFIX + "granularity";
public static final String TIME_ZONE = CONF_PREFIX + "timeZone";
public static final String LOOKBACK_SPEC = CONF_PREFIX + "lookbackSpec";
private static final String DEFAULT_TIME_ZONE = "America/Los_Angeles";
private static final Pattern SUPPORTED_TIME_FORMAT = Pattern.compile("(yyyy/MM(/.*)*)|(yyyy-MM(-.*)*)");
private final String datasetPattern;
private final String datasetPartitionPattern;
private final String partitionPrefix;
private final DateTimeFormatter timeFormatter;
private final boolean enableVirtualPartition;
private final ZonedDateTime startTime;
private final ZonedDateTime endTime;
private final TimeIterator.Granularity granularity;
private final Properties props;
private final FileSystem fs;
public TimePartitionGlobFinder(FileSystem fs, Properties properties) {
this(fs, properties,
ZonedDateTime.now(ZoneId.of(properties.getProperty(TIME_ZONE, DEFAULT_TIME_ZONE))));
}
@VisibleForTesting
TimePartitionGlobFinder(FileSystem fs, Properties properties, ZonedDateTime curTime) {
datasetPattern = properties.getProperty(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY);
Path datasetPath = new Path(datasetPattern);
partitionPrefix = properties.getProperty(PARTITION_PREFIX, "");
String timeFormat = properties.getProperty(TIME_FORMAT).trim();
Preconditions.checkState(isTimeFormatSupported(timeFormat),
String.format("Unsupported time format %s, expecting %s", timeFormat, SUPPORTED_TIME_FORMAT));
timeFormatter = DateTimeFormatter.ofPattern(timeFormat);
endTime = curTime;
Duration lookback = Duration.parse(properties.getProperty(LOOKBACK_SPEC));
startTime = endTime.minus(lookback);
granularity = TimeIterator.Granularity.valueOf(properties.getProperty(GRANULARITY).toUpperCase());
datasetPartitionPattern = new Path(datasetPath,
partitionPrefix + derivePartitionPattern(startTime, endTime, timeFormat)).toString();
log.info("Dataset partition pattern is {}", datasetPartitionPattern);
enableVirtualPartition = Boolean.valueOf(properties.getProperty(ENABLE_VIRTUAL_PARTITION, "false"));
props = properties;
this.fs = fs;
}
/**
* The finder supports time format matching {@link #SUPPORTED_TIME_FORMAT}
*/
@VisibleForTesting
static boolean isTimeFormatSupported(String timeFormat) {
return SUPPORTED_TIME_FORMAT.matcher(timeFormat).matches();
}
/**
* Derive partition glob pattern from time format. It tries its best to provide
* a fine pattern by refining year and month options from reasoning
* start time, end time and {@link #SUPPORTED_TIME_FORMAT}
*/
@VisibleForTesting
static String derivePartitionPattern(ZonedDateTime start,
ZonedDateTime end, String timeFormat) {
// Refine year options
int startYear = start.getYear();
int endYear = end.getYear();
StringBuilder yearOptions = new StringBuilder("{" + startYear);
appendOptions(yearOptions, startYear + 1, endYear);
yearOptions.append("}");
// Get month options
StringBuilder monthOptions = buildMonthOptions(start, end);
StringBuilder pattern = new StringBuilder(yearOptions);
if (timeFormat.contains("-")) {
pattern.append("-");
pattern.append(monthOptions);
//
if (!monthOptions.toString().equals("*")) {
pattern.append("*");
}
} else {
pattern.append("/");
pattern.append(monthOptions);
String[] parts = timeFormat.split("/");
// We already processed year and month components
for (int i = 2; i < parts.length; i++) {
pattern.append("/*");
}
}
return pattern.toString();
}
/**
* Refine month options
*/
private static StringBuilder buildMonthOptions(ZonedDateTime start,
ZonedDateTime end) {
int startMonth = start.getMonthValue();
int endMonth = end.getMonthValue();
int yearDiff = end.getYear() - start.getYear();
if ( yearDiff > 1 || (yearDiff == 1 && endMonth >= startMonth)) {
// All 12 months
return new StringBuilder("*");
}
// Append start month
StringBuilder monthOptions = new StringBuilder("{");
if (startMonth < 10) {
monthOptions.append("0");
}
monthOptions.append(startMonth);
if (endMonth >= startMonth) {
appendOptions(monthOptions, startMonth + 1, endMonth);
} else {
// from [startMonth + 1, 12] of start year
appendOptions(monthOptions, startMonth + 1, 12);
// from [1, endMonth] of current year
appendOptions(monthOptions, 1, endMonth);
}
monthOptions.append("}");
return monthOptions;
}
private static void appendOptions(StringBuilder stringBuilder, int start, int end) {
for (int i = start; i <= end; i++) {
stringBuilder.append(",");
if (i < 10) {
stringBuilder.append("0");
}
stringBuilder.append(i);
}
}
@Override
public List<FileSystemDataset> findDatasets()
throws IOException {
try {
return doFindDatasets();
} finally {
// Recover ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY config
this.props.setProperty(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY, datasetPattern);
}
}
private List<FileSystemDataset> doFindDatasets() throws IOException {
// Find datasets
List<FileSystemDataset> datasets = findDatasets(datasetPattern);
// Compute partitions in theory based on startTime and endTime
Set<String> computedPartitions = new HashSet<>();
datasets.forEach(dataset -> computedPartitions.addAll(computePartitions(dataset)));
// This is the final result
List<FileSystemDataset> resultPartitions = new ArrayList<>(computedPartitions.size());
// Find all physical dataset time partitions
List<FileSystemDataset> actualPartitions = findDatasets(datasetPartitionPattern);
String pathStr;
for (FileSystemDataset physicalPartition : actualPartitions) {
pathStr = physicalPartition.datasetRoot().toString();
if (computedPartitions.contains(pathStr)) {
resultPartitions.add(physicalPartition);
computedPartitions.remove(pathStr);
}
}
// Create virtual ones;
if (enableVirtualPartition) {
computedPartitions.forEach(partition -> {
log.info("Creating virtual partition {}", partition);
resultPartitions.add(new SimpleFileSystemDataset(new Path(partition), true));
});
} else {
log.info("Will not create virtual partitions");
}
return resultPartitions;
}
private Collection<String> computePartitions(FileSystemDataset dataset) {
List<String> partitions = new ArrayList<>();
TimeIterator iterator = new TimeIterator(startTime, endTime, granularity);
while (iterator.hasNext()) {
partitions.add(new Path(dataset.datasetRoot(),
partitionPrefix + timeFormatter.format(iterator.next())).toString());
}
return partitions;
}
private List<FileSystemDataset> findDatasets(String pattern)
throws IOException {
this.props.setProperty(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY, pattern);
DefaultFileSystemGlobFinder datasetFinder = new DefaultFileSystemGlobFinder(this.fs, this.props);
return datasetFinder.findDatasets();
}
@Override
public Path commonDatasetRoot() {
return PathUtils.deepestNonGlobPath(new Path(this.datasetPattern));
}
}
| 2,449 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/SimpleDatasetRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import com.typesafe.config.Config;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.request_allocation.Request;
import org.apache.gobblin.util.request_allocation.Requestor;
import org.apache.gobblin.util.request_allocation.ResourceEstimator;
import org.apache.gobblin.util.request_allocation.ResourcePool;
import org.apache.gobblin.util.request_allocation.ResourceRequirement;
/**
* A simple {@link Request} which represents a {@link Dataset}
*/
@AllArgsConstructor
public class SimpleDatasetRequest implements Request<SimpleDatasetRequest> {
public static final String SIMPLE_DATASET_COUNT_DIMENSION = "count";
@Getter
Dataset dataset;
SimpleDatasetRequestor requestor;
@Override
public Requestor<SimpleDatasetRequest> getRequestor() {
return requestor;
}
/**
* A simple {@link ResourceEstimator} which counts {@link SimpleDatasetRequest} as the only dimension
*/
public static class SimpleDatasetCountEstimator implements ResourceEstimator<SimpleDatasetRequest> {
static class Factory implements ResourceEstimator.Factory<SimpleDatasetRequest> {
@Override
public ResourceEstimator<SimpleDatasetRequest> create(Config config) {
return new SimpleDatasetCountEstimator();
}
}
public ResourceRequirement estimateRequirement(SimpleDatasetRequest request, ResourcePool pool) {
return new ResourceRequirement.Builder(pool).setRequirement(SIMPLE_DATASET_COUNT_DIMENSION, 1).build();
}
}
@Override
public String toString() {
return dataset.toString();
}
}
| 2,450 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/dataset/DefaultFileSystemGlobFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.Properties;
/**
* A subclass of {@link ConfigurableGlobDatasetFinder} which find all the {@link FileSystemDataset}
* that matches a given glob pattern.
*/
public class DefaultFileSystemGlobFinder extends ConfigurableGlobDatasetFinder<FileSystemDataset> {
public DefaultFileSystemGlobFinder(FileSystem fs, Properties properties) throws IOException {
super(fs, properties);
}
public FileSystemDataset datasetAtPath(final Path path) throws IOException {
return new SimpleFileSystemDataset(path);
}
}
| 2,451 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/source/DatasetFinderSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.source;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.PartitionableDataset;
import org.apache.gobblin.source.WorkUnitStreamSource;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.util.HadoopUtils;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* An abstract source that uses a {@link org.apache.gobblin.dataset.DatasetsFinder} to find {@link Dataset}s and creates a
* work unit for each one.
*/
@Slf4j
public abstract class DatasetFinderSource<S, D> implements WorkUnitStreamSource<S, D> {
protected final boolean drilldownIntoPartitions;
/**
* @param drilldownIntoPartitions if set to true, will process each partition of a {@link PartitionableDataset} as a
* separate work unit.
*/
public DatasetFinderSource(boolean drilldownIntoPartitions) {
this.drilldownIntoPartitions = drilldownIntoPartitions;
}
/**
* @return the {@link WorkUnit} for the input dataset.
*/
protected abstract WorkUnit workUnitForDataset(Dataset dataset);
/**
* @return the {@link WorkUnit} for the input partition.
*/
protected abstract WorkUnit workUnitForDatasetPartition(PartitionableDataset.DatasetPartition partition);
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
return createWorkUnitStream(state).collect(Collectors.toList());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
@Override
public WorkUnitStream getWorkunitStream(SourceState state) {
try {
return new BasicWorkUnitStream.Builder(createWorkUnitStream(state).iterator()).build();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
/**
* Can be overriden to specify a non-pluggable {@link org.apache.gobblin.dataset.DatasetsFinder}.
* @throws IOException
*/
protected IterableDatasetFinder createDatasetsFinder(SourceState state) throws IOException {
return DatasetUtils.instantiateIterableDatasetFinder(state.getProperties(),
HadoopUtils.getSourceFileSystem(state), null);
}
private Stream<WorkUnit> createWorkUnitStream(SourceState state) throws IOException {
IterableDatasetFinder datasetsFinder = createDatasetsFinder(state);
Stream<Dataset> datasetStream = datasetsFinder.getDatasetsStream(0, null);
if (this.drilldownIntoPartitions) {
return datasetStream.flatMap(dataset -> {
if (dataset instanceof PartitionableDataset) {
try {
return (Stream<PartitionableDataset.DatasetPartition>) ((PartitionableDataset) dataset).getPartitions(0,
null);
} catch (IOException ioe) {
log.error("Failed to get partitions for dataset " + dataset.getUrn());
return Stream.empty();
}
} else {
return Stream.of(new DatasetWrapper(dataset));
}
}).map(this::workUnitForPartitionInternal).filter(Objects::nonNull);
} else {
return datasetStream.map(this::workUnitForDataset).filter(Objects::nonNull);
}
}
private WorkUnit workUnitForPartitionInternal(PartitionableDataset.DatasetPartition partition) {
if (partition instanceof DatasetWrapper) {
return workUnitForDataset(((DatasetWrapper) partition).dataset);
} else {
return workUnitForDatasetPartition(partition);
}
}
/**
* A wrapper around a {@link org.apache.gobblin.dataset.PartitionableDataset.DatasetPartition} that makes it look
* like a {@link Dataset} for slightly easier to understand code.
*/
@AllArgsConstructor
protected static class DatasetWrapper implements PartitionableDataset.DatasetPartition {
@Getter
private final Dataset dataset;
@Override
public String getUrn() {
return this.dataset.datasetURN();
}
}
}
| 2,452 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/source/LoopingDatasetFinderSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.source;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.Spliterator;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.PeekingIterator;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.PartitionableDataset;
import org.apache.gobblin.dataset.URNIdentified;
import org.apache.gobblin.dataset.comparators.URNLexicographicalComparator;
import org.apache.gobblin.runtime.task.NoopTask;
import org.apache.gobblin.source.workunit.BasicWorkUnitStream;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
/**
* A source that processes datasets generated by a {@link org.apache.gobblin.dataset.DatasetsFinder}, processing a few of
* them each run, and continuing from where it left off in the next run. When it is done processing all the datasets, it
* starts over from the beginning. The datasets are processed in lexicographical order based on URN.
*
* TODO: handle retries
*/
@Slf4j
public abstract class LoopingDatasetFinderSource<S, D> extends DatasetFinderSource<S, D> {
public static final String MAX_WORK_UNITS_PER_RUN_KEY =
"gobblin.source.loopingDatasetFinderSource.maxWorkUnitsPerRun";
public static final int MAX_WORK_UNITS_PER_RUN = 10;
public static final String DATASET_PARTITION_DELIMITER = "@";
protected static final String DATASET_URN = "gobblin.source.loopingDatasetFinderSource.datasetUrn";
protected static final String PARTITION_URN = "gobblin.source.loopingDatasetFinderSource.partitionUrn";
protected static final String END_OF_DATASETS_KEY = "gobblin.source.loopingDatasetFinderSource.endOfDatasets";
protected static final String GLOBAL_WATERMARK_DATASET_KEY =
"gobblin.source.loopingDatasetFinderSource.globalWatermarkDataset";
private final URNLexicographicalComparator lexicographicalComparator = new URNLexicographicalComparator();
protected boolean isDatasetStateStoreEnabled;
/**
* @param drilldownIntoPartitions if set to true, will process each partition of a {@link PartitionableDataset} as a
* separate work unit.
*/
public LoopingDatasetFinderSource(boolean drilldownIntoPartitions) {
super(drilldownIntoPartitions);
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
return Lists.newArrayList(getWorkunitStream(state).getMaterializedWorkUnitCollection());
}
@Override
public WorkUnitStream getWorkunitStream(SourceState state) {
return this.getWorkunitStream(state,false);
}
public WorkUnitStream getWorkunitStream(SourceState state, boolean isDatasetStateStoreEnabled) {
this.isDatasetStateStoreEnabled = isDatasetStateStoreEnabled;
try {
int maximumWorkUnits = state.getPropAsInt(MAX_WORK_UNITS_PER_RUN_KEY, MAX_WORK_UNITS_PER_RUN);
Preconditions.checkArgument(maximumWorkUnits > 0, "Max work units must be greater than 0!");
List<WorkUnitState> previousWorkUnitStates = (this.isDatasetStateStoreEnabled) ? state
.getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN)
: state.getPreviousWorkUnitStates();
Optional<WorkUnitState> maxWorkUnit = Optional.empty();
for (WorkUnitState workUnitState : previousWorkUnitStates) {
if (workUnitState.getPropAsBoolean(GLOBAL_WATERMARK_DATASET_KEY, false)) {
maxWorkUnit = Optional.of(workUnitState);
break;
}
}
IterableDatasetFinder datasetsFinder = createDatasetsFinder(state);
Stream<Dataset> datasetStream =
datasetsFinder.getDatasetsStream(Spliterator.SORTED, this.lexicographicalComparator);
datasetStream = sortStreamLexicographically(datasetStream);
String previousDatasetUrnWatermark = null;
String previousPartitionUrnWatermark = null;
if (maxWorkUnit.isPresent() && !maxWorkUnit.get().getPropAsBoolean(END_OF_DATASETS_KEY, false)) {
previousDatasetUrnWatermark = maxWorkUnit.get().getProp(DATASET_URN);
previousPartitionUrnWatermark = maxWorkUnit.get().getProp(PARTITION_URN);
}
return new BasicWorkUnitStream.Builder(getWorkUnitIterator(datasetStream.iterator(), previousDatasetUrnWatermark,
previousPartitionUrnWatermark, maximumWorkUnits)).setFiniteStream(true).build();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
/**
* A factory to generate {@link WorkUnitStream} given a generic type datasetIterator.
* @throws IOException
*/
protected Iterator<WorkUnit> getWorkUnitIterator(Iterator<Dataset> datasetIterator, String previousDatasetUrnWatermark,
@Nullable String previousPartitionUrnWatermark, int maximumWorkUnits) throws IOException {
return new DeepIterator(datasetIterator, previousDatasetUrnWatermark, previousPartitionUrnWatermark,
maximumWorkUnits);
}
/**
* A deep iterator that advances input streams until the correct position, then possibly iterates over partitions
* of {@link PartitionableDataset}s.
*/
protected class DeepIterator extends AbstractIterator<WorkUnit> {
protected final Iterator<Dataset> baseIterator;
protected final int maxWorkUnits;
protected int generatedWorkUnits = 0;
protected Dataset previousDataset;
private Iterator<PartitionableDataset.DatasetPartition> currentPartitionIterator;
private PartitionableDataset.DatasetPartition previousPartition;
public DeepIterator(Iterator<Dataset> baseIterator, String previousDatasetUrnWatermark,
String previousPartitionUrnWatermark, int maxWorkUnits)
throws IOException {
this.maxWorkUnits = maxWorkUnits;
this.baseIterator = baseIterator;
Dataset equalDataset =
advanceUntilLargerThan(Iterators.peekingIterator(this.baseIterator), previousDatasetUrnWatermark);
if (drilldownIntoPartitions && equalDataset != null && equalDataset instanceof PartitionableDataset) {
this.currentPartitionIterator = getPartitionIterator((PartitionableDataset) equalDataset);
advanceUntilLargerThan(Iterators.peekingIterator(this.currentPartitionIterator), previousPartitionUrnWatermark);
} else {
this.currentPartitionIterator = Collections.emptyIterator();
}
}
/**
* Advance an iterator until the next value is larger than the reference.
* @return the last value polled if it is equal to reference, or null otherwise.
*/
@Nullable
private <T extends URNIdentified> T advanceUntilLargerThan(PeekingIterator<T> it, String reference) {
if (reference == null) {
return null;
}
int comparisonResult = -1;
while (it.hasNext() && (comparisonResult = lexicographicalComparator.compare(it.peek(), reference)) < 0) {
it.next();
}
return comparisonResult == 0 ? it.next() : null;
}
private Iterator<PartitionableDataset.DatasetPartition> getPartitionIterator(PartitionableDataset dataset) {
try {
return this.currentPartitionIterator = sortStreamLexicographically(
dataset.getPartitions(Spliterator.SORTED, LoopingDatasetFinderSource.this.lexicographicalComparator))
.iterator();
} catch (IOException ioe) {
log.error("Failed to get partitions for dataset " + dataset.getUrn());
return Collections.emptyIterator();
}
}
@Override
protected WorkUnit computeNext() {
if (this.generatedWorkUnits == this.maxWorkUnits) {
/**
* Add a special noop workunit to the end of the stream. This workunit contains the Dataset/Partition
* URN of the "last" dataset/partition (in lexicographic order). This is useful to
* efficiently determine the next dataset/partition to process in the subsequent run.
*/
this.generatedWorkUnits++;
return generateNoopWorkUnit();
} else if (this.generatedWorkUnits > this.maxWorkUnits) {
return endOfData();
}
WorkUnit resultWU = doComputeNext();
if (resultWU == null) {
resultWU = generateNoopWorkUnit();
this.generatedWorkUnits = Integer.MAX_VALUE;
resultWU.setProp(END_OF_DATASETS_KEY, true);
}
return resultWU;
}
/**
* A extensible method that generate a workunit based on the Iterator generated from {@link #getWorkUnitIterator}.
* It interacts with {@link #baseIterator} and {@link #currentPartitionIterator} to know the very next
* dataset/partition to be converted into a workunit.
*/
protected WorkUnit doComputeNext() {
while (this.baseIterator.hasNext() || this.currentPartitionIterator.hasNext()) {
if (this.currentPartitionIterator != null && this.currentPartitionIterator.hasNext()) {
PartitionableDataset.DatasetPartition partition = this.currentPartitionIterator.next();
WorkUnit workUnit = workUnitForDatasetPartition(partition);
if (workUnit == null) {
continue;
}
addDatasetInfoToWorkUnit(workUnit, partition.getDataset());
addPartitionInfoToWorkUnit(workUnit, partition);
this.previousDataset = partition.getDataset();
this.previousPartition = partition;
this.generatedWorkUnits++;
return workUnit;
}
Dataset dataset = this.baseIterator.next();
if (drilldownIntoPartitions && dataset instanceof PartitionableDataset) {
this.currentPartitionIterator = getPartitionIterator((PartitionableDataset) dataset);
} else {
WorkUnit workUnit = workUnitForDataset(dataset);
if (workUnit == null) {
continue;
}
addDatasetInfoToWorkUnit(workUnit, dataset);
this.previousDataset = dataset;
this.generatedWorkUnits++;
return workUnit;
}
}
return null;
}
/**
* It is not necessary the case that each workunit is corresponding to a single {@link Dataset},
* thus we make this method extensible.
*/
protected void addDatasetInfoToWorkUnit(WorkUnit workUnit, Dataset dataset) {
if (isDatasetStateStoreEnabled) {
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, dataset.getUrn());
}
}
private void addPartitionInfoToWorkUnit(WorkUnit workUnit, PartitionableDataset.DatasetPartition partition) {
if (isDatasetStateStoreEnabled) {
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY,
Joiner.on(DATASET_PARTITION_DELIMITER).join(partition.getDataset().getUrn(), partition.getUrn()));
}
}
private WorkUnit generateNoopWorkUnit() {
WorkUnit workUnit = NoopTask.noopWorkunit();
workUnit.setProp(GLOBAL_WATERMARK_DATASET_KEY, true);
if (previousDataset != null) {
workUnit.setProp(DATASET_URN, previousDataset.getUrn());
}
if (drilldownIntoPartitions && this.previousPartition != null) {
workUnit.setProp(PARTITION_URN, previousPartition.getUrn());
}
if (isDatasetStateStoreEnabled) {
workUnit.setProp(ConfigurationKeys.DATASET_URN_KEY, ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
}
return workUnit;
}
}
/**
* Sort input stream lexicographically. Noop if the input stream is already sorted.
*/
private <T extends URNIdentified> Stream<T> sortStreamLexicographically(Stream<T> inputStream) {
Spliterator<T> spliterator = inputStream.spliterator();
if (spliterator.hasCharacteristics(Spliterator.SORTED) && spliterator.getComparator()
.equals(this.lexicographicalComparator)) {
return StreamSupport.stream(spliterator, false);
}
return StreamSupport.stream(spliterator, false).sorted(this.lexicographicalComparator);
}
}
| 2,453 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/TestTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Lists;
import lombok.Data;
import lombok.Getter;
/**
* Implementation of {@link ProxiedTrash} to use for testing. All operations in this implementation are noop, but user
* can get all delete operations executed using {@link #getDeleteOperations}. This implementation does not use the
* file system at all, so user can use a minimally mocked file system.
*
* <p>
* This class optionally support simulating file system delay with an internal clock. The clock does not advance
* by itself, allowing programmers fine testing over a file system with delay.
* </p>
*/
public class TestTrash extends MockTrash {
private static final String DELAY_TICKS_KEY = "gobblin.trash.test.delays.ticks";
/**
* Creates {@link java.util.Properties} that will generate a {@link org.apache.gobblin.data.management.trash.TestTrash} when
* using {@link org.apache.gobblin.data.management.trash.TrashFactory}.
*/
public static Properties propertiesForTestTrash() {
Properties properties = new Properties();
properties.setProperty(TrashFactory.TRASH_TEST, Boolean.toString(true));
return properties;
}
/**
* Mutates properties so that creating a TestTrash with this properties object will simulate delay in the
* filesystem.
*
* <p>
* When simulating delay, any operation related to the filesystem will initially block indefinitely. The test
* trash uses an internal clock that must be advanced by the user (it does not advance by itself).
* Operations are blocked for a specified number of ticks in the clock. To tick, the user must call the
* {@link #tick} method.
* </p>
*
* <p>
* For example, if delay is 2:
* * User calls testTrash.moveToTrash(new Path("/")) -> call blocks indefinitely, nothing added to delete operations
* list.
* * User calls testTrash.tick() -> call still blocked.
* * User calls testTrash.tick() -> moveToTrash call returns, operation added to delete operations list.
* </p>
*
* @param properties {@link Properties} used for building a test trash.
* @param delay All calls to {@link TestTrash} involving file system will simulate a delay of this many ticks.
*/
public static void simulateDelay(Properties properties, long delay) {
properties.setProperty(DELAY_TICKS_KEY, Long.toString(delay));
}
/**
* Abstraction for a delete operation. Stores deleted {@link org.apache.hadoop.fs.Path} and user proxied for the
* deletion. When calling {@link #moveToTrash}, {@link #user} is set to null.
*/
@Data
public static class DeleteOperation {
private final Path path;
private final String user;
}
@Getter
private final List<DeleteOperation> deleteOperations;
private final String user;
private final long delay;
private long clockState;
private final Lock lock;
private final Condition clockStateUpdated;
private final Condition signalReceived;
private final AtomicLong callsReceivedSignal;
private final AtomicLong operationsWaiting;
private final AtomicLong operationsReceived;
@Getter
private final boolean simulate;
@Getter
private final boolean skipTrash;
public TestTrash(FileSystem fs, Properties props, String user) throws IOException {
super(fs, propertiesForConstruction(props), user);
this.user = user;
this.deleteOperations = Lists.newArrayList();
this.simulate =
props.containsKey(TrashFactory.SIMULATE) && Boolean.parseBoolean(props.getProperty(TrashFactory.SIMULATE));
this.skipTrash =
props.containsKey(TrashFactory.SKIP_TRASH) && Boolean.parseBoolean(props.getProperty(TrashFactory.SKIP_TRASH));
this.operationsReceived = new AtomicLong();
this.lock = new ReentrantLock();
this.clockStateUpdated = this.lock.newCondition();
this.signalReceived = this.lock.newCondition();
this.clockState = 0;
this.operationsWaiting = new AtomicLong();
this.callsReceivedSignal = new AtomicLong();
if (props.containsKey(DELAY_TICKS_KEY)) {
this.delay = Long.parseLong(props.getProperty(DELAY_TICKS_KEY));
} else {
this.delay = 0;
}
}
@Override
public boolean moveToTrash(Path path) throws IOException {
this.operationsReceived.incrementAndGet();
addDeleteOperation(new DeleteOperation(path, null));
return true;
}
@Override
public boolean moveToTrashAsUser(Path path, String user) throws IOException {
this.operationsReceived.incrementAndGet();
addDeleteOperation(new DeleteOperation(path, user));
return true;
}
@Override
public boolean moveToTrashAsOwner(Path path) throws IOException {
return moveToTrashAsUser(path, this.user);
}
public long getOperationsReceived() {
return this.operationsReceived.get();
}
public long getOperationsWaiting() {
return this.operationsWaiting.get();
}
/**
* Advance the internal clock by one tick. The call will block until all appropriate threads finish adding their
* {@link DeleteOperation}s to the list.
*/
public void tick() {
this.lock.lock();
try {
// Advance clock
this.clockState++;
// Acquire lock, register how many threads are waiting for signal
long callsAwaitingSignalOld = this.operationsWaiting.get();
this.callsReceivedSignal.set(0);
this.operationsWaiting.set(0);
// Send signal
this.clockStateUpdated.signalAll();
while (this.callsReceivedSignal.get() < callsAwaitingSignalOld) {
// this will release the lock, and it will periodically compare the number of threads that were awaiting
// signal against the number of threads that have already received the signal. Therefore, this statement
// will block until all threads have acked signal.
this.signalReceived.await();
}
} catch (InterruptedException ie) {
// Interrupted
} finally {
this.lock.unlock();
}
}
private void addDeleteOperation(DeleteOperation dop) {
// Acquire lock
this.lock.lock();
// Figure out when the operation can return
long executeAt = this.clockState + this.delay;
boolean firstLoop = true;
try {
// If delay is 0, this continues immediately.
while (this.clockState < executeAt) {
// If this is not the first loop, it means we have received a signal from tick, but still not at
// appropriate clock state. Ack the receive (this is done here because if it is ready to "delete", it should
// only ack after actually adding the DeleteOperation to the list).
if (!firstLoop) {
this.callsReceivedSignal.incrementAndGet();
this.signalReceived.signalAll();
}
firstLoop = false;
// Add itself to the list of calls awaiting signal
this.operationsWaiting.incrementAndGet();
// Await for signal that the clock has been updated
this.clockStateUpdated.await();
}
// Perform "delete" operation, i.e. add DeleteOperation to list
this.deleteOperations.add(dop);
// Ack receipt of signal
this.callsReceivedSignal.incrementAndGet();
this.signalReceived.signal();
} catch (InterruptedException ie) {
// Interrupted
} finally {
this.lock.unlock();
}
}
private static Properties propertiesForConstruction(Properties properties) {
Properties newProperties = new Properties();
newProperties.putAll(properties);
newProperties.setProperty(Trash.SNAPSHOT_CLEANUP_POLICY_CLASS_KEY,
NoopSnapshotCleanupPolicy.class.getCanonicalName());
newProperties.setProperty(Trash.TRASH_LOCATION_KEY, "/test/path");
return newProperties;
}
}
| 2,454 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/AsyncTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
import java.io.Closeable;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
/**
* Implementation of {@link Trash} that deletes files asynchronously and in parallel.
*
* <p>
* This implementation is not built through {@link TrashFactory} because coder must be aware that the trash
* implementation is asynchronous. However, internally it uses {@link TrashFactory} to instantiate the trash
* implementation that will actually perform the deletes. This class acts as a {@link Decorator} of the
* inner trash.
* </p>
*
* <p>
* Trash methods will always return true, regardless of success of the actual trash operation. However, additional
* methods are provided to get a future for the operation.
* </p>
*/
public class AsyncTrash implements GobblinProxiedTrash, Closeable, Decorator {
public static final String MAX_DELETING_THREADS_KEY = "gobblin.trash.async.max.deleting.threads";
public static final int DEFAULT_MAX_DELETING_THREADS = 100;
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncTrash.class);
private final ProxiedTrash innerTrash;
private final ListeningExecutorService executor;
public AsyncTrash(FileSystem fs, Properties properties) throws IOException {
this(fs, properties, UserGroupInformation.getCurrentUser().getShortUserName());
}
public AsyncTrash(FileSystem fs, Properties properties, String user) throws IOException {
int maxDeletingThreads = DEFAULT_MAX_DELETING_THREADS;
if (properties.containsKey(MAX_DELETING_THREADS_KEY)) {
maxDeletingThreads = Integer.parseInt(properties.getProperty(MAX_DELETING_THREADS_KEY));
}
this.innerTrash = TrashFactory.createProxiedTrash(fs, properties, user);
this.executor = ExecutorsUtils.loggingDecorator(
MoreExecutors.getExitingExecutorService(ScalingThreadPoolExecutor.newScalingThreadPool(0, maxDeletingThreads,
100, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("Async-trash-delete-pool-%d")))));
}
@Override
public boolean moveToTrashAsUser(Path path, String user) throws IOException {
moveToTrashAsUserFuture(path, user);
return true;
}
/**
* Schedules a {@link ProxiedTrash#moveToTrashAsUser} and returns a future for this operation.
* @param path {@link Path} to delete.
* @param user User to execute the operation as.
* @return true if operation succeeded.
*/
public ListenableFuture<Boolean> moveToTrashAsUserFuture(final Path path, final String user) {
return this.executor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
return AsyncTrash.this.innerTrash.moveToTrashAsUser(path, user);
}
});
}
public boolean moveToTrashAsOwner(Path path) {
moveToTrashAsOwnerFuture(path);
return true;
}
/**
* Schedules a {@link ProxiedTrash#moveToTrashAsOwner} and returns a future for this operation.
* @param path {@link Path} to delete.
* @return true if operation succeeded.
*/
public ListenableFuture<Boolean> moveToTrashAsOwnerFuture(final Path path) {
return this.executor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
return AsyncTrash.this.innerTrash.moveToTrashAsOwner(path);
}
});
}
@Override
public boolean moveToTrash(Path path) throws IOException {
moveToTrashFuture(path);
return true;
}
/**
* Schedules a {@link ProxiedTrash#moveToTrash} and returns a future for this operation.
* @param path {@link Path} to delete.
* @return true if operation succeeded.
*/
public ListenableFuture<Boolean> moveToTrashFuture(final Path path) {
return this.executor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
return AsyncTrash.this.innerTrash.moveToTrash(path);
}
});
}
@Override
public Object getDecoratedObject() {
return this.innerTrash;
}
@Override
public void close() throws IOException {
try {
this.executor.shutdown();
this.executor.awaitTermination(5, TimeUnit.HOURS);
} catch (InterruptedException ie) {
this.executor.shutdownNow();
}
}
}
| 2,455 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/Trash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.util.PathUtils;
/**
* Flexible implementation of Trash similar to Hadoop trash. Allows for injecting cleanup policies for snapshots.
*/
public class Trash implements GobblinTrash {
private static final Logger LOG = LoggerFactory.getLogger(Trash.class);
private static final FsPermission PERM = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
private static final FsPermission ALL_PERM = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
public static final String TRASH_CLASS_KEY = "trash.class";
/**
* Location of trash directory in file system. The location can include a token $USER that will be automatically
* replaced by the name of the active user.
*/
public static final String TRASH_LOCATION_KEY = "gobblin.trash.location";
public static final String SNAPSHOT_CLEANUP_POLICY_CLASS_KEY = "gobblin.trash.snapshot.cleanup.policy.class";
public static final String TRASH_SNAPSHOT_PREFIX = "_TRASH_SNAPSHOT_";
public static final String TRASH_IDENTIFIER_FILE = "_THIS_IS_TRASH_DIRECTORY";
public static final String DEFAULT_TRASH_DIRECTORY = "_GOBBLIN_TRASH";
public static final DateTimeFormatter TRASH_SNAPSHOT_NAME_FORMATTER =
DateTimeFormat.forPattern(String.format("'%s'yyyyMMddHHmmss", TRASH_SNAPSHOT_PREFIX)).withZone(DateTimeZone.UTC);
public static final PathFilter TRASH_SNAPSHOT_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return !path.getName().equals(TRASH_IDENTIFIER_FILE) && path.getName().startsWith(TRASH_SNAPSHOT_PREFIX);
}
};
public static final PathFilter TRASH_NOT_SNAPSHOT_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return !path.getName().equals(TRASH_IDENTIFIER_FILE) && !path.getName().startsWith(TRASH_SNAPSHOT_PREFIX);
}
};
/**
* Get trash location.
* @return {@link org.apache.hadoop.fs.Path} for trash directory.
* @throws IOException
*/
public Path getTrashLocation() throws IOException {
return this.trashLocation;
}
/**
* Create location of Trash directory. Parsed from props at key {@link #TRASH_LOCATION_KEY}, defaulting to
* /home/directory/_GOBBLIN_TRASH.
* @param fs {@link org.apache.hadoop.fs.FileSystem} where trash should be found.
* @param props {@link java.util.Properties} containing trash configuration.
* @param user If the trash location contains the token $USER, the token will be replaced by the value of user.
* @return {@link org.apache.hadoop.fs.Path} for trash directory.
* @throws java.io.IOException
*/
protected Path createTrashLocation(FileSystem fs, Properties props, String user) throws IOException {
Path trashLocation;
if (props.containsKey(TRASH_LOCATION_KEY)) {
trashLocation = new Path(props.getProperty(TRASH_LOCATION_KEY).replaceAll("\\$USER", user));
} else {
trashLocation = new Path(fs.getHomeDirectory(), DEFAULT_TRASH_DIRECTORY);
LOG.info("Using default trash location at " + trashLocation);
}
if (!trashLocation.isAbsolute()) {
throw new IllegalArgumentException("Trash location must be absolute. Found " + trashLocation.toString());
}
Path qualifiedTrashLocation = fs.makeQualified(trashLocation);
ensureTrashLocationExists(fs, qualifiedTrashLocation);
return qualifiedTrashLocation;
}
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation) throws IOException {
if (fs.exists(trashLocation)) {
if (!fs.isDirectory(trashLocation)) {
throw new IOException(String.format("Trash location %s is not a directory.", trashLocation));
}
if (!fs.exists(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
// If trash identifier file is not present, directory might have been created by user.
// Add trash identifier file only if directory is empty.
if (fs.listStatus(trashLocation).length > 0) {
throw new IOException(String.format("Trash directory %s exists, but it does not look like a trash directory. "
+ "File: %s missing and directory is not empty.", trashLocation, TRASH_IDENTIFIER_FILE));
} else if (!fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
throw new IOException(String.format("Failed to create file %s in existing trash directory %s.",
TRASH_IDENTIFIER_FILE, trashLocation));
}
}
} else if (!(safeFsMkdir(fs, trashLocation.getParent(), ALL_PERM) && safeFsMkdir(fs, trashLocation, PERM)
&& fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE)))) {
// Failed to create directory or create trash identifier file.
throw new IOException("Failed to create trash directory at " + trashLocation.toString());
}
}
protected final FileSystem fs;
private final Path trashLocation;
private final SnapshotCleanupPolicy snapshotCleanupPolicy;
/**
* @deprecated Use {@link org.apache.gobblin.data.management.trash.TrashFactory}.
*/
@Deprecated
public Trash(FileSystem fs) throws IOException {
this(fs, new Properties());
}
/**
* @deprecated Use {@link org.apache.gobblin.data.management.trash.TrashFactory}.
*/
@Deprecated
public Trash(FileSystem fs, Properties props) throws IOException {
this(fs, props, UserGroupInformation.getCurrentUser().getUserName());
}
protected Trash(FileSystem fs, Properties props, String user) throws IOException {
this.fs = fs;
this.trashLocation = createTrashLocation(fs, props, user);
try {
Class<?> snapshotCleanupPolicyClass = Class.forName(props.getProperty(SNAPSHOT_CLEANUP_POLICY_CLASS_KEY,
TimeBasedSnapshotCleanupPolicy.class.getCanonicalName()));
this.snapshotCleanupPolicy =
(SnapshotCleanupPolicy) snapshotCleanupPolicyClass.getConstructor(Properties.class).newInstance(props);
} catch (Exception exception) {
throw new IllegalArgumentException("Could not create snapshot cleanup policy with class " + props
.getProperty(SNAPSHOT_CLEANUP_POLICY_CLASS_KEY, TimeBasedSnapshotCleanupPolicy.class.getCanonicalName()),
exception);
}
}
public static Trash getTrash(FileSystem fs, Properties props, String user) throws IOException {
if (props.contains(TRASH_CLASS_KEY)) {
return GobblinConstructorUtils.invokeConstructor(Trash.class, props.getProperty(TRASH_CLASS_KEY), fs, props, user);
} else {
return new Trash(fs, props, user);
}
}
/**
* Move a path to trash. The absolute path of the input path will be replicated under the trash directory.
* @param path {@link org.apache.hadoop.fs.FileSystem} path to move to trash.
* @return true if move to trash was done successfully.
* @throws IOException
*/
@Override
public boolean moveToTrash(Path path) throws IOException {
Path fullyResolvedPath = path.isAbsolute() ? path : new Path(this.fs.getWorkingDirectory(), path);
Path targetPathInTrash = PathUtils.mergePaths(this.trashLocation, fullyResolvedPath);
if (!this.fs.exists(targetPathInTrash.getParent())) {
this.fs.mkdirs(targetPathInTrash.getParent());
} else if (this.fs.exists(targetPathInTrash)) {
targetPathInTrash = targetPathInTrash.suffix("_" + System.currentTimeMillis());
}
return this.fs.rename(fullyResolvedPath, targetPathInTrash);
}
/**
* Moves all current contents of trash directory into a snapshot directory with current timestamp.
* @throws IOException
*/
public void createTrashSnapshot() throws IOException {
FileStatus[] pathsInTrash = this.fs.listStatus(this.trashLocation, TRASH_NOT_SNAPSHOT_PATH_FILTER);
if (pathsInTrash.length <= 0) {
LOG.info("Nothing in trash. Will not create snapshot.");
return;
}
Path snapshotDir = new Path(this.trashLocation, new DateTime().toString(TRASH_SNAPSHOT_NAME_FORMATTER));
if (this.fs.exists(snapshotDir)) {
throw new IOException("New snapshot directory " + snapshotDir.toString() + " already exists.");
}
if (!safeFsMkdir(fs, snapshotDir, PERM)) {
throw new IOException("Failed to create new snapshot directory at " + snapshotDir.toString());
}
LOG.info(String.format("Moving %d paths in Trash directory to newly created snapshot at %s.", pathsInTrash.length,
snapshotDir.toString()));
int pathsFailedToMove = 0;
for (FileStatus fileStatus : pathsInTrash) {
Path pathRelativeToTrash = PathUtils.relativizePath(fileStatus.getPath(), this.trashLocation);
Path targetPath = new Path(snapshotDir, pathRelativeToTrash);
boolean movedThisPath = true;
try {
movedThisPath = this.fs.rename(fileStatus.getPath(), targetPath);
} catch (IOException exception) {
LOG.error("Failed to move path " + fileStatus.getPath().toString() + " to snapshot.", exception);
pathsFailedToMove += 1;
continue;
}
if (!movedThisPath) {
LOG.error("Failed to move path " + fileStatus.getPath().toString() + " to snapshot.");
pathsFailedToMove += 1;
}
}
if (pathsFailedToMove > 0) {
LOG.error(
String.format("Failed to move %d paths to the snapshot at %s.", pathsFailedToMove, snapshotDir.toString()));
}
}
/**
* For each existing trash snapshot, uses a {@link org.apache.gobblin.data.management.trash.SnapshotCleanupPolicy} to determine whether
* the snapshot should be deleted. If so, delete it permanently.
*
* <p>
* Each existing snapshot will be passed to {@link org.apache.gobblin.data.management.trash.SnapshotCleanupPolicy#shouldDeleteSnapshot}
* from oldest to newest, and will be deleted if the method returns true.
* </p>
*
* @throws IOException
*/
public void purgeTrashSnapshots() throws IOException {
List<FileStatus> snapshotsInTrash =
Arrays.asList(this.fs.listStatus(this.trashLocation, TRASH_SNAPSHOT_PATH_FILTER));
Collections.sort(snapshotsInTrash, new Comparator<FileStatus>() {
@Override
public int compare(FileStatus o1, FileStatus o2) {
return TRASH_SNAPSHOT_NAME_FORMATTER.parseDateTime(o1.getPath().getName())
.compareTo(TRASH_SNAPSHOT_NAME_FORMATTER.parseDateTime(o2.getPath().getName()));
}
});
int totalSnapshots = snapshotsInTrash.size();
int snapshotsDeleted = 0;
for (FileStatus snapshot : snapshotsInTrash) {
if (this.snapshotCleanupPolicy.shouldDeleteSnapshot(snapshot, this)) {
try {
boolean successfullyDeleted = this.fs.delete(snapshot.getPath(), true);
if (successfullyDeleted) {
snapshotsDeleted++;
} else {
LOG.error("Failed to delete snapshot " + snapshot.getPath());
}
} catch (IOException exception) {
LOG.error("Failed to delete snapshot " + snapshot.getPath(), exception);
}
}
}
LOG.info(String.format("Deleted %d out of %d existing snapshots.", snapshotsDeleted, totalSnapshots));
}
/**
* Safe creation of trash folder to ensure thread-safe.
* @throws IOException
*/
private boolean safeFsMkdir(FileSystem fs, Path f, FsPermission permission) throws IOException {
try {
return fs.mkdirs(f, permission);
} catch (IOException e) {
// To handle the case when trash folder is created by other threads
// The case is rare and we don't put synchronized keywords for performance consideration.
if (!fs.exists(f)) {
throw new IOException("Failed to create trash folder while it is still not existed yet.");
} else {
LOG.debug("Target folder %s has been created by other threads.", f.toString());
return true;
}
}
}
}
| 2,456 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/TrashFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Factory for creating {@link org.apache.gobblin.data.management.trash.Trash} instance. Will automatically use
* {@link org.apache.gobblin.data.management.trash.TestTrash} if {@link #TRASH_TEST} is true,
* {@link org.apache.gobblin.data.management.trash.MockTrash} if {@link #SIMULATE} is true,
* and {@link org.apache.gobblin.data.management.trash.ImmediateDeletionTrash} if {@link #SKIP_TRASH} is true.
* Otherwise, it will use {@link org.apache.gobblin.data.management.trash.ProxiedTrash} or {@link org.apache.gobblin.data.management.trash.Trash}.
*/
public class TrashFactory {
private static final Logger LOG = LoggerFactory.getLogger(TrashFactory.class);
public static final String TRASH_TEST = "gobblin.trash.test";
public static final String SIMULATE = "gobblin.trash.simulate";
public static final String SKIP_TRASH = "gobblin.trash.skip.trash";
public static Trash createTrash(FileSystem fs) throws IOException {
return createTrash(fs, new Properties());
}
public static Trash createTrash(FileSystem fs, Properties props) throws IOException {
return createTrash(fs, props, UserGroupInformation.getCurrentUser().getShortUserName());
}
/**
* Creates a {@link org.apache.gobblin.data.management.trash.Trash} instance.
* @param fs {@link org.apache.hadoop.fs.FileSystem} where trash is located.
* @param props {@link java.util.Properties} used to generate trash.
* @param user $USER tokens in the trash path will be replaced by this string.
* @return instance of {@link org.apache.gobblin.data.management.trash.Trash}.
* @throws IOException
*/
public static Trash createTrash(FileSystem fs, Properties props, String user)
throws IOException {
Trash trash = createTestMockOrImmediateDeletionTrash(fs, props, user);
if (null != trash) {
return trash;
} else {
return Trash.getTrash(fs, props, user);
}
}
public static ProxiedTrash createProxiedTrash(FileSystem fs) throws IOException {
return createProxiedTrash(fs, new Properties());
}
public static ProxiedTrash createProxiedTrash(FileSystem fs, Properties props) throws IOException {
return createProxiedTrash(fs, props, UserGroupInformation.getCurrentUser().getShortUserName());
}
/**
* Creates a {@link org.apache.gobblin.data.management.trash.ProxiedTrash} instance.
* @param fs {@link org.apache.hadoop.fs.FileSystem} where trash is located.
* @param props {@link java.util.Properties} used to generate trash.
* @param user $USER tokens in the trash path will be replaced by this string.
* @return instance of {@link org.apache.gobblin.data.management.trash.ProxiedTrash}.
* @throws IOException
*/
public static ProxiedTrash createProxiedTrash(FileSystem fs, Properties props, String user)
throws IOException {
ProxiedTrash trash = createTestMockOrImmediateDeletionTrash(fs, props, user);
if (null != trash) {
return trash;
} else {
return ProxiedTrash.getProxiedTrash(fs, props, user);
}
}
/**
* This creates {@link TestTrash}, {@link MockTrash} or {@link ImmediateDeletionTrash according to the properties set.
* @param fs file system object
* @param props properties
* @param user user to create trash as
* @return {@link TestTrash}, {@link MockTrash} or {@link ImmediateDeletionTrash or null if none of these trashes are
* requested
* @throws IOException
*/
private static ProxiedTrash createTestMockOrImmediateDeletionTrash(FileSystem fs, Properties props, String user) throws IOException {
if(props.containsKey(TRASH_TEST) && Boolean.parseBoolean(props.getProperty(TRASH_TEST))) {
LOG.info("Creating a test trash. Nothing will actually be deleted.");
return new TestTrash(fs, props, user);
}
if(props.containsKey(SIMULATE) && Boolean.parseBoolean(props.getProperty(SIMULATE))) {
LOG.info("Creating a simulate trash. Nothing will actually be deleted.");
return new MockTrash(fs, props, user);
}
if(props.containsKey(SKIP_TRASH) && Boolean.parseBoolean(props.getProperty(SKIP_TRASH))) {
LOG.info("Creating an immediate deletion trash. Files will be deleted immediately instead of moved to trash.");
return new ImmediateDeletionTrash(fs, props, user);
}
return null;
}
}
| 2,457 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/GobblinProxiedTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
/**
* Interface for proxy enabled trash.
*/
public interface GobblinProxiedTrash extends GobblinTrash {
/**
* Move the path to trash as specified user.
* @param path {@link org.apache.hadoop.fs.Path} to move.
* @param user User to move the path as.
* @return true if the move succeeded.
* @throws IOException
*/
public boolean moveToTrashAsUser(Path path, final String user) throws IOException;
}
| 2,458 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/TimeBasedSnapshotCleanupPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.joda.time.DateTime;
/**
* Policy that deletes snapshots if they are older than {@link #SNAPSHOT_RETENTION_POLICY_MINUTES_KEY} minutes.
*/
public class TimeBasedSnapshotCleanupPolicy implements SnapshotCleanupPolicy {
public static final String SNAPSHOT_RETENTION_POLICY_MINUTES_KEY = "gobblin.trash.snapshot.retention.minutes";
public static final int SNAPSHOT_RETENTION_POLICY_MINUTES_DEFAULT = 1440; // one day
private final int retentionMinutes;
public TimeBasedSnapshotCleanupPolicy(Properties props) {
this.retentionMinutes = Integer.parseInt(props.getProperty(SNAPSHOT_RETENTION_POLICY_MINUTES_KEY,
Integer.toString(SNAPSHOT_RETENTION_POLICY_MINUTES_DEFAULT)));
}
@Override
public boolean shouldDeleteSnapshot(FileStatus snapshot, Trash trash) {
DateTime snapshotTime = Trash.TRASH_SNAPSHOT_NAME_FORMATTER.parseDateTime(snapshot.getPath().getName());
return snapshotTime.plusMinutes(this.retentionMinutes).isBeforeNow();
}
}
| 2,459 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/ProxiedTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import org.apache.gobblin.util.ProxiedFileSystemCache;
/**
* An implementation of {@link org.apache.gobblin.data.management.trash.Trash} that allows deleting files as different users.
* Uses {@link org.apache.gobblin.util.ProxiedFileSystemCache} to proxy as different users.
*/
public class ProxiedTrash extends Trash implements GobblinProxiedTrash {
private final Cache<String, Trash> trashCache = CacheBuilder.newBuilder().maximumSize(100).build();
private final Properties properties;
public ProxiedTrash(FileSystem fs, Properties props, String user) throws IOException {
super(fs, props, user);
this.properties = props;
}
public static ProxiedTrash getProxiedTrash(FileSystem fs, Properties props, String user) throws IOException {
if (props.containsKey(TRASH_CLASS_KEY)) {
return GobblinConstructorUtils.invokeConstructor(ProxiedTrash.class, props.getProperty(TRASH_CLASS_KEY), fs, props, user);
} else {
return new ProxiedTrash(fs, props, user);
}
}
/**
* Move the path to trash as specified user.
* @param path {@link org.apache.hadoop.fs.Path} to move.
* @param user User to move the path as.
* @return true if the move succeeded.
* @throws IOException
*/
@Override
public boolean moveToTrashAsUser(Path path, final String user) throws IOException {
return getUserTrash(user).moveToTrash(path);
}
/**
* Move the path to trash as the owner of the path.
* @param path {@link org.apache.hadoop.fs.Path} to move.
* @return true if the move succeeded.
* @throws IOException
*/
public boolean moveToTrashAsOwner(Path path) throws IOException {
String owner = this.fs.getFileStatus(path).getOwner();
return moveToTrashAsUser(path, owner);
}
/**
* Create a trash snapshot as the specified user.
* @param user user to proxy.
* @throws IOException
*/
public void createTrashSnapshotAsUser(String user) throws IOException {
getUserTrash(user).createTrashSnapshot();
}
/**
* Purge trash snapshots as the specified user.
* @param user user to proxy.
* @throws IOException
*/
public void purgeTrashSnapshotsAsUser(String user) throws IOException {
getUserTrash(user).purgeTrashSnapshots();
}
/**
* Create trash snapshots for all users with trash directories. These users are determined by listing all directories in
* the file system matching the trash pattern given by {@link #TRASH_LOCATION_KEY}.
* @throws IOException
*/
public void createTrashSnapshotsForAllUsers() throws IOException {
for (String user : getAllUsersWithTrash()) {
createTrashSnapshotAsUser(user);
}
}
/**
* Purge trash snapshots for all users with trash directories. These users are determined by listing all directories in
* the file system matching the trash pattern given by {@link #TRASH_LOCATION_KEY}.
* @throws IOException
*/
public void purgeTrashSnapshotsForAllUsers() throws IOException {
for (String user : getAllUsersWithTrash()) {
purgeTrashSnapshotsAsUser(user);
}
}
/**
* Find all users with trash directories by listing all directories in
* the file system matching the trash pattern given by {@link #TRASH_LOCATION_KEY}.
* @return List of users with trash directory.
* @throws IOException
*/
protected List<String> getAllUsersWithTrash() throws IOException {
Path trashLocationGlob = new Path(this.properties.getProperty(TRASH_LOCATION_KEY).replaceAll("\\$USER", "*"));
Pattern userPattern =
Pattern.compile(this.properties.getProperty(TRASH_LOCATION_KEY).replaceAll("\\$USER", "([^/])"));
List<String> users = Lists.newArrayList();
for (FileStatus fileStatus : this.fs.globStatus(trashLocationGlob)) {
Matcher matcher = userPattern.matcher(fileStatus.getPath().toString());
if (matcher.find()) {
users.add(matcher.group(1));
}
}
return users;
}
/**
* Get {@link org.apache.gobblin.data.management.trash.Trash} instance for the specified user.
* @param user user for whom {@link org.apache.gobblin.data.management.trash.Trash} should be generated.
* @return {@link org.apache.gobblin.data.management.trash.Trash} as generated by proxied user.
* @throws IOException
*/
protected Trash getUserTrash(final String user) throws IOException {
if (UserGroupInformation.getCurrentUser().getShortUserName().equals(user)) {
return this;
}
try {
return this.trashCache.get(user, new Callable<Trash>() {
@Override
public Trash call() throws Exception {
return createNewTrashForUser(ProxiedTrash.this.fs, ProxiedTrash.this.properties, user);
}
});
} catch (ExecutionException ee) {
throw new IOException("Failed to get trash for user " + user);
}
}
protected Trash createNewTrashForUser(FileSystem fs, Properties properties, String user) throws IOException {
return new Trash(
ProxiedFileSystemCache.fromProperties().referenceFS(fs).properties(properties).userNameToProxyAs(user).build(),
properties, user);
}
}
| 2,460 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/NoopSnapshotCleanupPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
/**
* Noop implementation of {@link org.apache.gobblin.data.management.trash.SnapshotCleanupPolicy}.
*/
public class NoopSnapshotCleanupPolicy implements SnapshotCleanupPolicy {
public NoopSnapshotCleanupPolicy(Properties props) {}
@Override
public boolean shouldDeleteSnapshot(FileStatus snapshot, Trash trash) {
return false;
}
}
| 2,461 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/ImmediateDeletionTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.util.ProxiedFileSystemCache;
/**
* {@link org.apache.gobblin.data.management.trash.ProxiedTrash} implementation that immediately deletes
* {@link org.apache.hadoop.fs.Path}s instead of moving them to trash.
*/
public class ImmediateDeletionTrash extends ProxiedTrash {
public ImmediateDeletionTrash(FileSystem fs, Properties props, String user)
throws IOException {
super(fs, props, user);
}
@Override
protected Trash createNewTrashForUser(FileSystem fs, Properties properties, String user)
throws IOException {
return new ImmediateDeletionTrash(
ProxiedFileSystemCache.fromProperties().userNameToProxyAs(user).properties(properties).referenceFS(fs).build(),
properties, user);
}
@Override
public boolean moveToTrash(Path path)
throws IOException {
return this.fs.delete(path, true);
}
@Override
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation)
throws IOException {
// Do nothing
}
}
| 2,462 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/GobblinTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
/**
* Interface for Trash.
*/
public interface GobblinTrash {
/**
* Move the input path to trash.
* @param path {@link Path} to move to trash.
* @return true if move succeeded.
* @throws IOException
*/
public boolean moveToTrash(Path path) throws IOException;
}
| 2,463 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/SnapshotCleanupPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import org.apache.hadoop.fs.FileStatus;
/**
* Policy for determining whether a {@link org.apache.gobblin.data.management.trash.Trash} snapshot should be deleted.
*/
public interface SnapshotCleanupPolicy {
/**
* Decide whether a trash snapshot should be permanently deleted from the file system.
*
* <p>
* This method will be called for all snapshots in the trash directory, in order from oldest to newest.
* </p>
*
* @param snapshot {@link org.apache.hadoop.fs.FileStatus} of candidate snapshot for deletion.
* @param trash {@link org.apache.gobblin.data.management.trash.Trash} object that called this method.
* @return true if the snapshot should be deleted permanently.
*/
boolean shouldDeleteSnapshot(FileStatus snapshot, Trash trash);
}
| 2,464 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/trash/MockTrash.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
/**
* Mock version of {@link org.apache.gobblin.data.management.trash.ProxiedTrash} for simulating deletions. Can also be used as
* a mock for {@link org.apache.gobblin.data.management.trash.Trash}.
*/
public class MockTrash extends ProxiedTrash {
private static final Logger LOG = LoggerFactory.getLogger(MockTrash.class);
public MockTrash(FileSystem fs, Properties props, String user)
throws IOException {
super(fs, props, user);
}
@Override
public boolean moveToTrash(Path path)
throws IOException {
LOG.info("Simulating move to trash: " + path);
return true;
}
@Override
public void createTrashSnapshot()
throws IOException {
throw new UnsupportedOperationException("Not supported for " + MockTrash.class);
}
@Override
public void purgeTrashSnapshots()
throws IOException {
throw new UnsupportedOperationException("Not supported for " + MockTrash.class);
}
@Override
protected Path createTrashLocation(FileSystem fs, Properties props, String user)
throws IOException {
return super.createTrashLocation(fs, props, user);
}
@Override
protected List<String> getAllUsersWithTrash()
throws IOException {
return Lists.newArrayList();
}
@Override
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation)
throws IOException {
// Do nothing
}
@Override
protected Trash getUserTrash(String user)
throws IOException {
return this;
}
}
| 2,465 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/FileStatusAware.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import org.apache.hadoop.fs.FileStatus;
/**
* A {@link FileSystemDatasetVersion} that is aware {@link FileStatus}s or its paths
*/
public interface FileStatusAware {
/**
* Get the set of {@link FileStatus}s that are included in this dataset version or the {@link FileStatus} of the dataset
* version itself (In which case the set has one file status).
*
*/
public Set<FileStatus> getFileStatuses();
}
| 2,466 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/TimestampedDatasetStateStoreVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import org.joda.time.DateTime;
import lombok.Getter;
/**
* {@link TimestampedDatasetVersion} that has a {@link DatasetStateStoreEntryManager}
*/
@Getter
public class TimestampedDatasetStateStoreVersion extends TimestampedDatasetVersion implements DatasetStateStoreVersion {
private final DatasetStateStoreEntryManager entry;
public TimestampedDatasetStateStoreVersion(DatasetStateStoreEntryManager entry) {
super(new DateTime(entry.getTimestamp()), null);
this.entry = entry;
}
@Override
public int compareTo(FileSystemDatasetVersion other) {
TimestampedDatasetVersion otherAsDateTime = (TimestampedDatasetVersion) other;
return this.version.equals(otherAsDateTime.version) ? 0 : this.version.compareTo(otherAsDateTime.version);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
if (obj instanceof TimestampedDatasetStateStoreVersion) {
TimestampedDatasetStateStoreVersion other = (TimestampedDatasetStateStoreVersion)obj;
if (this.entry.equals(other.getEntry())) {
return super.equals(obj);
}
}
return false;
}
@Override
public int hashCode() {
int result = this.version.hashCode();
result = 31 * result + (entry != null ? entry.hashCode() : 0);
return result;
}
}
| 2,467 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/StringDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Sets;
import lombok.Getter;
/**
* Dataset version using {@link java.lang.String} as version.
*/
@Getter
public class StringDatasetVersion implements FileSystemDatasetVersion {
protected final String version;
protected final Path path;
public StringDatasetVersion(String version, Path path) {
this.version = version;
this.path = path;
}
@Override
public int compareTo(FileSystemDatasetVersion other) {
StringDatasetVersion otherAsString = (StringDatasetVersion) other;
return this.version.equals(otherAsString.version) ? this.path.compareTo(otherAsString.path) : this.version
.compareTo(otherAsString.version);
}
@Override
public boolean equals(Object obj) {
return obj != null && this.getClass().equals(obj.getClass()) && compareTo((StringDatasetVersion) obj) == 0;
}
@Override
public int hashCode() {
return this.path.hashCode() + this.version.hashCode();
}
@Override
public String toString() {
return this.version;
}
@Override
public Set<Path> getPaths() {
return Sets.newHashSet(this.path);
}
}
| 2,468 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/HiveDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import org.apache.hadoop.hive.ql.metadata.Partition;
/**
* Represents a {@link DatasetVersion} for hive partition
*/
public interface HiveDatasetVersion extends FileSystemDatasetVersion {
/**
* The partition associated with this {@link DatasetVersion}
* @return
*/
public Partition getPartition();
}
| 2,469 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/FileSystemDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import org.apache.hadoop.fs.Path;
/**
* Wrapper around {@link java.lang.Comparable} for dataset versions.
*/
public interface FileSystemDatasetVersion extends DatasetVersion, Comparable<FileSystemDatasetVersion> {
/**
* Get the set of {@link org.apache.hadoop.fs.Path}s that are included in this dataset version or the path of the dataset
* version itself (In which case the set has one path).
*
*/
public Set<Path> getPaths();
}
| 2,470 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/FileStatusDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Sets;
import lombok.Data;
/**
* Implementation of {@link org.apache.gobblin.data.management.version.DatasetVersion} that uses a single path per
* version and stores the {@link org.apache.hadoop.fs.FileStatus} of that path.
*/
@Data
public class FileStatusDatasetVersion extends StringDatasetVersion {
protected final FileStatus fileStatus;
public FileStatusDatasetVersion(FileStatus fileStatus) {
super(fileStatus.getPath().getName(), fileStatus.getPath());
this.fileStatus = fileStatus;
}
@Override
public int compareTo(FileSystemDatasetVersion other) {
FileStatusDatasetVersion otherAsFileStatus = (FileStatusDatasetVersion) other;
return this.fileStatus.getPath().compareTo(otherAsFileStatus.getFileStatus().getPath());
}
@Override
public boolean equals(Object obj) {
return obj != null && this.getClass().equals(obj.getClass()) && compareTo((FileSystemDatasetVersion) obj) == 0;
}
@Override
public int hashCode() {
return this.fileStatus.hashCode();
}
@Override
public Set<Path> getPaths() {
return Sets.newHashSet(this.fileStatus.getPath());
}
}
| 2,471 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/FileStatusTimestampedDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import lombok.EqualsAndHashCode;
import org.apache.hadoop.fs.FileStatus;
import org.joda.time.DateTime;
import com.google.common.collect.Sets;
/**
* A {@link TimestampedDatasetVersion} that is also aware of the {@link FileStatus}s of all its paths.
*/
@EqualsAndHashCode(callSuper=true)
public class FileStatusTimestampedDatasetVersion extends TimestampedDatasetVersion implements FileStatusAware {
private final FileStatus fileStatus;
public FileStatusTimestampedDatasetVersion(DateTime version, FileStatus fileStatus) {
super(version, fileStatus.getPath());
this.fileStatus = fileStatus;
}
@Override
public Set<FileStatus> getFileStatuses() {
return Sets.newHashSet(this.fileStatus);
}
}
| 2,472 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/DatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
/**
* Dataset version.
*/
public interface DatasetVersion {
/**
* Get the version representation.
*/
public Object getVersion();
}
| 2,473 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/TimestampedDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import com.google.common.collect.Sets;
import lombok.Getter;
/**
* {@link org.apache.gobblin.data.management.version.DatasetVersion} based on a timestamp.
*/
@Getter
public class TimestampedDatasetVersion implements FileSystemDatasetVersion {
protected final DateTime version;
protected final Path path;
public TimestampedDatasetVersion(DateTime version, Path path) {
this.version = version;
this.path = path;
}
public DateTime getDateTime() {
return this.version;
}
@Override
public int compareTo(FileSystemDatasetVersion other) {
TimestampedDatasetVersion otherAsDateTime = (TimestampedDatasetVersion) other;
return this.version.equals(otherAsDateTime.version) ? this.path.compareTo(otherAsDateTime.path)
: this.version.compareTo(otherAsDateTime.version);
}
@Override
public boolean equals(Object obj) {
return obj instanceof TimestampedDatasetVersion && compareTo((TimestampedDatasetVersion) obj) == 0;
}
@Override
public int hashCode() {
return this.version.hashCode() + this.path.hashCode();
}
@Override
public String toString() {
return "Version " + this.version.toString(DateTimeFormat.shortDateTime()) + " at path " + this.path;
}
@Override
public Set<Path> getPaths() {
return Sets.newHashSet(this.path);
}
}
| 2,474 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/DatasetStateStoreVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
/**
* {@link DatasetVersion} that has a {@link DatasetStateStoreEntryManager}
*/
public interface DatasetStateStoreVersion extends DatasetVersion {
DatasetStateStoreEntryManager getEntry();
}
| 2,475 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/TimestampedHiveDatasetVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.joda.time.DateTime;
/**
* A {@link HiveDatasetVersion} where the version is a timestamp associated with the {@link Partition}. Usually this
* is the create time or modification time.
*/
public class TimestampedHiveDatasetVersion extends TimestampedDatasetVersion implements HiveDatasetVersion {
private final Partition partition;
public TimestampedHiveDatasetVersion(DateTime version, Partition partition) {
super(version, partition.getDataLocation());
this.partition = partition;
}
@Override
public Partition getPartition() {
return this.partition;
}
@Override
public boolean equals(Object obj) {
return super.equals(obj) && obj instanceof TimestampedHiveDatasetVersion && compareTo((TimestampedHiveDatasetVersion) obj) == 0;
}
@Override
public int hashCode() {
return this.partition.hashCode() + super.hashCode();
}
}
| 2,476 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/TimestampedDatasetStateStoreVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetStateStoreVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.metastore.DatasetStoreDataset;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import com.google.common.collect.Lists;
/**
* {@link VersionFinder} for {@link TimestampedDatasetStateStoreVersion}
*/
public class TimestampedDatasetStateStoreVersionFinder implements VersionFinder<TimestampedDatasetStateStoreVersion> {
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetStateStoreVersion.class;
}
@Override
public Collection<TimestampedDatasetStateStoreVersion> findDatasetVersions(Dataset dataset) throws IOException {
DatasetStoreDataset storeDataset = ((DatasetStoreDataset) dataset);
List<TimestampedDatasetStateStoreVersion> versions = Lists.newArrayList();
for (DatasetStateStoreEntryManager entry : storeDataset.getDatasetStateStoreMetadataEntries()) {
versions.add(new TimestampedDatasetStateStoreVersion(entry));
}
return versions;
}
}
| 2,477 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/VersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* Finds dataset versions.
*
* @param <T> Type of {@link DatasetVersion} expected from this class.
*/
public interface VersionFinder<T extends DatasetVersion> {
/**
* Should return class of T.
*/
public abstract Class<? extends DatasetVersion> versionClass();
/**
* Find dataset versions for {@link Dataset}. Each dataset versions represents a single manageable unit in the dataset.
*
* @param dataset which contains all versions.
* @return Collection of {@link DatasetVersion} for each dataset version found.
* @throws IOException
*/
public Collection<T> findDatasetVersions(Dataset dataset) throws IOException;
}
| 2,478 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/GlobModTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
/**
* Finds {@link FileSystemDatasetVersion}s using a glob pattern. Uses Modification time as the version.
*/
public class GlobModTimeDatasetVersionFinder extends DatasetVersionFinder<TimestampedDatasetVersion> {
private final Path globPattern;
private static final String VERSION_FINDER_GLOB_PATTERN_KEY = "version.globPattern";
public GlobModTimeDatasetVersionFinder(FileSystem fs, Config config) {
this(fs, config.hasPath(VERSION_FINDER_GLOB_PATTERN_KEY)
? new Path(config.getString(VERSION_FINDER_GLOB_PATTERN_KEY)) : new Path("*"));
}
public GlobModTimeDatasetVersionFinder(FileSystem fs, Properties props) {
this(fs, ConfigFactory.parseProperties(props));
}
public GlobModTimeDatasetVersionFinder(FileSystem fs, Path globPattern) {
super(fs);
this.globPattern = globPattern;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return this.globPattern;
}
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
try {
return new TimestampedDatasetVersion(new DateTime(this.fs.getFileStatus(fullPath).getModificationTime()),
fullPath);
} catch (IOException e) {
return null;
}
}
}
| 2,479 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/DatePartitionHiveVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.version.TimestampedHiveDatasetVersion;
import org.apache.gobblin.util.ConfigUtils;
/**
* A Hive Partition finder where the the version is the partition value.
* <p>
* The hive table needs to be date partitioned by prop value {@value #PARTITION_KEY_NAME_KEY}. The value of this key must be
* a date pattern as per prop value {@value #PARTITION_VALUE_DATE_TIME_PATTERN_KEY}.
* </p>
* <p>
* E.g if the hive partition is datepartition=2016-01-10-22/field1=f1Value.
* The {@value #PARTITION_KEY_NAME_KEY}=datepartiton and {@value #PARTITION_VALUE_DATE_TIME_PATTERN_KEY}=yyyy-MM-dd-HH
*
* </p>
*/
public class DatePartitionHiveVersionFinder extends AbstractHiveDatasetVersionFinder {
public static final String PARTITION_VALUE_DATE_TIME_PATTERN_KEY = "hive.partition.value.datetime.pattern";
public static final String DEFAULT_PARTITION_VALUE_DATE_TIME_PATTERN = "yyyy-MM-dd-HH";
public static final String PARTITION_VALUE_DATE_TIME_TIMEZONE_KEY = "hive.partition.value.datetime.timezone";
public static final String DEFAULT_PARTITION_VALUE_DATE_TIME_TIMEZONE = ConfigurationKeys.PST_TIMEZONE_NAME;
public static final String PARTITION_KEY_NAME_KEY = "hive.partition.key.name";
public static final String DEFAULT_PARTITION_KEY_NAME = "datepartition";
protected final DateTimeFormatter formatter;
private final String partitionKeyName;
private final Predicate<FieldSchema> partitionKeyNamePredicate;
private final String pattern;
public DatePartitionHiveVersionFinder(FileSystem fs, Config config) {
this.pattern =
ConfigUtils.getString(config, PARTITION_VALUE_DATE_TIME_PATTERN_KEY, DEFAULT_PARTITION_VALUE_DATE_TIME_PATTERN);
if (config.hasPath(PARTITION_VALUE_DATE_TIME_TIMEZONE_KEY)) {
this.formatter = DateTimeFormat.forPattern(pattern)
.withZone(DateTimeZone.forID(config.getString(PARTITION_VALUE_DATE_TIME_TIMEZONE_KEY)));
} else {
this.formatter =
DateTimeFormat.forPattern(pattern).withZone(DateTimeZone.forID(DEFAULT_PARTITION_VALUE_DATE_TIME_TIMEZONE));
}
this.partitionKeyName = ConfigUtils.getString(config, PARTITION_KEY_NAME_KEY, DEFAULT_PARTITION_KEY_NAME);
this.partitionKeyNamePredicate = new Predicate<FieldSchema>() {
@Override
public boolean apply(FieldSchema input) {
return StringUtils.equalsIgnoreCase(input.getName(), DatePartitionHiveVersionFinder.this.partitionKeyName);
}
};
}
/**
* Create a {@link TimestampedHiveDatasetVersion} from a {@link Partition}. The hive table is expected
* to be date partitioned by {@link #partitionKeyName}. The partition value format must be {@link #pattern}
*
* @throws IllegalArgumentException when {@link #partitionKeyName} is not found in the <code></code>
* @throws IllegalArgumentException when a value can not be found for {@link #partitionKeyName} in the <code>partition</code>
* @throws IllegalArgumentException if the partition value can not be parsed with {@link #pattern}
* {@inheritDoc}
*/
@Override
protected TimestampedHiveDatasetVersion getDatasetVersion(Partition partition) {
int index = Iterables.indexOf(partition.getTable().getPartitionKeys(), this.partitionKeyNamePredicate);
if (index == -1) {
throw new IllegalArgumentException(String
.format("Failed to find partition key %s in the table %s", this.partitionKeyName,
partition.getTable().getCompleteName()));
}
if (index >= partition.getValues().size()) {
throw new IllegalArgumentException(String
.format("Failed to find partition value for key %s in the partition %s", this.partitionKeyName,
partition.getName()));
}
return new TimestampedHiveDatasetVersion(
this.formatter.parseDateTime(partition.getValues().get(index).trim().substring(0, this.pattern.length())),
partition);
}
}
| 2,480 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/HdfsModifiedTimeHiveVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.joda.time.DateTime;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.version.TimestampedHiveDatasetVersion;
/**
* A Hive Partition finder where the the version is the partition value.
*/
public class HdfsModifiedTimeHiveVersionFinder extends AbstractHiveDatasetVersionFinder {
private final FileSystem fs;
public HdfsModifiedTimeHiveVersionFinder(FileSystem fs, Config config) {
this.fs = fs;
}
/**
* Create a {@link TimestampedHiveDatasetVersion} from a {@link Partition} based on the Modified time of underlying
* hdfs data location
* @throws IllegalArgumentException when argument is null
* @throws IllegalArgumentException when data location of partition is null
* @throws IllegalArgumentException when data location of partition doesn't exist
* {@inheritDoc}
*/
@Override
protected TimestampedHiveDatasetVersion getDatasetVersion(Partition partition) {
try {
Preconditions.checkArgument(partition != null, "Argument to method ");
Path dataLocation = partition.getDataLocation();
Preconditions
.checkArgument(dataLocation != null, "Data location is null for partition " + partition.getCompleteName());
boolean exists = this.fs.exists(dataLocation);
Preconditions.checkArgument(exists, "Data location doesn't exist for partition " + partition.getCompleteName());
long modificationTS = this.fs.getFileStatus(dataLocation).getModificationTime();
return new TimestampedHiveDatasetVersion(new DateTime(modificationTS), partition);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 2,481 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/UnixTimestampVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.StringDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of {@link VersionFinder} that generates {@link TimestampedDatasetVersion} from a unix timestamp
* in the name of each version path.
*
* <p>
* The timestamp will be determined using a regex specified in the configuration key
* gobblin.retention.watermakr.regex . This class will attempt to interpret the 1st capture group in the regex as
* a unix timestamp. If no regex is provided, then the class will attempt to interpret the entire name of the
* version path as a unix timestamp.
* </p>
*/
public class UnixTimestampVersionFinder extends DatasetVersionFinder<TimestampedDatasetVersion> {
private static final Logger LOGGER = LoggerFactory.getLogger(UnixTimestampVersionFinder.class);
private final WatermarkDatasetVersionFinder embeddedFinder;
public UnixTimestampVersionFinder(FileSystem fs, Properties props) {
super(fs, props);
this.embeddedFinder = new WatermarkDatasetVersionFinder(fs, props);
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return this.embeddedFinder.globVersionPattern();
}
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
StringDatasetVersion version = this.embeddedFinder.getDatasetVersion(pathRelativeToDatasetRoot, fullPath);
if (version == null) {
// This means that the embedded finder could not parse a version.
return null;
}
try {
Long timestamp = Long.parseLong(version.getVersion());
return new TimestampedDatasetVersion(new DateTime(timestamp), fullPath);
} catch (NumberFormatException nfe) {
LOGGER.warn(String.format("Could not parse long from dataset version %s. Skipping.", pathRelativeToDatasetRoot));
return null;
} catch (IllegalArgumentException iae) {
LOGGER.warn(String.format("Could not parse unix datetime for dataset version %s. Skipping.",
pathRelativeToDatasetRoot));
return null;
}
}
}
| 2,482 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/AbstractHiveDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicates;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveUtils;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.HiveDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.AutoReturnableObject;
/**
* An abstract {@link VersionFinder} to create {@link HiveDatasetVersion}s for all {@link Partition}s of a {@link HiveDataset}.
* Calls {@link #getDatasetVersion(Partition)} for every {@link Partition} found.
*/
@Slf4j
public abstract class AbstractHiveDatasetVersionFinder implements VersionFinder<HiveDatasetVersion> {
@Override
public Class<? extends DatasetVersion> versionClass() {
return HiveDatasetVersion.class;
}
/**
* Create {@link HiveDatasetVersion}s for all {@link Partition}s of a {@link HiveDataset}.
* Calls {@link #getDatasetVersion(Partition)} for every {@link Partition} found.
* <p>
* Note: If an exception occurs while processing a partition, that partition will be ignored in the returned collection
* Also note that if the dataset passed is a view type, we will return an empty list even if the underlying table is
* partitioned.
* </p>
*
* @throws IllegalArgumentException if <code>dataset</code> is not a {@link HiveDataset}. Or if {@link HiveDataset#getTable()}
* is not partitioned.
*/
@Override
public Collection<HiveDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
if (!(dataset instanceof HiveDataset)) {
throw new IllegalArgumentException("HiveDatasetVersionFinder is only compatible with HiveDataset");
}
final HiveDataset hiveDataset = (HiveDataset) dataset;
if (!hiveDataset.getTable().isPartitioned()) {
if (hiveDataset.getTable().getTableType() == TableType.VIRTUAL_VIEW) {
log.warn("Skipping processing a view type dataset: ", ((HiveDataset) dataset).getTable().getTableName());
return Collections.emptyList();
} else {
throw new IllegalArgumentException("HiveDatasetVersionFinder is only compatible with partitioned hive tables. "
+ "This is a snapshot hive table.");
}
}
try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) {
List<Partition> partitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), Optional.<String> absent());
return Lists.newArrayList(Iterables.filter(Iterables.transform(partitions, new Function<Partition, HiveDatasetVersion>() {
@Override
public HiveDatasetVersion apply(Partition partition) {
try {
return getDatasetVersion(partition);
} catch (Throwable e) {
log.warn(String.format("Failed to get DatasetVersion %s. Skipping.", partition.getCompleteName()), e);
return null;
}
}
}), Predicates.notNull()));
}
}
/**
*
* Create a {@link HiveDatasetVersion} for the {@link Partition}
* @param partition for which a {@link HiveDatasetVersion} is created
*/
protected abstract HiveDatasetVersion getDatasetVersion(Partition partition);
}
| 2,483 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/DateTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.version.FileStatusTimestampedDatasetVersion;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
/**
* {@link org.apache.gobblin.data.management.version.finder.DatasetVersionFinder} for datasets based on path timestamps.
* Uses a datetime pattern to find dataset versions from the dataset path
* and parse the {@link org.joda.time.DateTime} representing the version.
*/
public class DateTimeDatasetVersionFinder extends AbstractDatasetVersionFinder<TimestampedDatasetVersion> {
private static final Logger LOGGER = LoggerFactory.getLogger(DateTimeDatasetVersionFinder.class);
/**
* Date pattern of the partition. E.g. yyyy/MM/dd/hh/mm or yyyy/MM/dd
*/
public static final String DATE_TIME_PATTERN_KEY = "version.datetime.pattern";
/**
* Time zone to be used E.g. UTC
*/
public static final String DATE_TIME_PATTERN_TIMEZONE_KEY = "version.datetime.timezone";
/**
* By default the globPattern is obtained by replacing all non-slash characters in datetime pattern by *.
* E.g. yyyy/MM/dd/hh/mm -> *\/*\/*\/*\/*.
* If this key is set, we use this globPattern to search for version
*/
public static final String OPTIONAL_GLOB_PATTERN_TIMEZONE_KEY = "version.globPattern";
public static final String DEFAULT_DATE_TIME_PATTERN_TIMEZONE = ConfigurationKeys.PST_TIMEZONE_NAME;
private final Path globPattern;
protected final DateTimeFormatter formatter;
private final String datePartitionPattern;
public DateTimeDatasetVersionFinder(FileSystem fs, Config config) {
super(fs);
Preconditions.checkArgument(config.hasPath(DATE_TIME_PATTERN_KEY) , "Missing required property " + DATE_TIME_PATTERN_KEY);
String pattern = config.getString(DATE_TIME_PATTERN_KEY);
if (config.hasPath(OPTIONAL_GLOB_PATTERN_TIMEZONE_KEY)) {
this.globPattern = new Path(config.getString(OPTIONAL_GLOB_PATTERN_TIMEZONE_KEY));
} else {
this.globPattern = new Path(pattern.replaceAll("[^/]+", "*"));
}
LOGGER.debug(String.format("Setting timezone for patthern: %s. By default it is %s", pattern,
DEFAULT_DATE_TIME_PATTERN_TIMEZONE));
if (config.hasPath(DATE_TIME_PATTERN_TIMEZONE_KEY)) {
this.formatter =
DateTimeFormat.forPattern(pattern).withZone(
DateTimeZone.forID(config.getString(DATE_TIME_PATTERN_TIMEZONE_KEY)));
} else {
this.formatter =
DateTimeFormat.forPattern(pattern).withZone(DateTimeZone.forID(DEFAULT_DATE_TIME_PATTERN_TIMEZONE));
}
this.datePartitionPattern = pattern;
}
public DateTimeDatasetVersionFinder(FileSystem fs, Properties props) {
this(fs, ConfigFactory.parseProperties(props));
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
/**
* Obtained by replacing all non-slash characters in datetime pattern by *.
* E.g. yyyy/MM/dd/hh/mm -> *\/*\/*\/*\/*
* Or glob pattern at {@value #OPTIONAL_GLOB_PATTERN_TIMEZONE_KEY} if set.
*/
@Override
public Path globVersionPattern() {
return this.globPattern;
}
/**
* Parse {@link org.joda.time.DateTime} from {@link org.apache.hadoop.fs.Path} using datetime pattern.
*/
@Override
public TimestampedDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus) {
String dateTimeString = null;
try {
// pathRelativeToDatasetRoot can be daily/2016/03/02 or 2016/03/02. In either case we need to pick 2016/03/02 as version
dateTimeString =
StringUtils.substring(pathRelativeToDatasetRoot.toString(), pathRelativeToDatasetRoot.toString().length()
- this.datePartitionPattern.length());
return new FileStatusTimestampedDatasetVersion(this.formatter.parseDateTime(dateTimeString), versionFileStatus);
} catch (IllegalArgumentException exception) {
LOGGER.warn(String.format(
"Candidate dataset version with pathRelativeToDatasetRoot: %s has inferred dataTimeString:%s. "
+ "It does not match expected datetime pattern %s. Ignoring.", pathRelativeToDatasetRoot, dateTimeString,
this.datePartitionPattern));
return null;
}
}
}
| 2,484 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/SingleVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.hadoop.fs.FileSystem;
import lombok.Getter;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.FileStatusDatasetVersion;
import org.apache.gobblin.data.management.version.StringDatasetVersion;
/**
* Implementation of {@link VersionFinder} that uses a {@link StringDatasetVersion} and simply creates a single
* {@link StringDatasetVersion} for the given {@link FileSystemDataset}.
*/
public class SingleVersionFinder implements VersionFinder<FileStatusDatasetVersion> {
@Getter
private FileSystem fs;
public SingleVersionFinder(FileSystem fs, Properties props) {
this.fs = fs;
}
public SingleVersionFinder(FileSystem fs, Config config) {
this.fs = fs;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return StringDatasetVersion.class;
}
@Override
public Collection<FileStatusDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
return Lists.newArrayList(new FileStatusDatasetVersion(this.fs.getFileStatus(((FileSystemDataset) dataset)
.datasetRoot())));
}
}
| 2,485 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/FileLevelTimestampVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.FileListUtils;
/**
* {@link org.apache.gobblin.data.management.version.finder.VersionFinder} that uses the most nested file,
* or directory if no file exists, level modifiedTimestamp under the datasetRoot path to find
* {@link org.apache.gobblin.data.management.version.FileSystemDatasetVersion}s, and represents each version as
* {@link org.apache.gobblin.data.management.version.TimestampedDatasetVersion} using the file level path
* and modifiedTimestamp.
*/
public class FileLevelTimestampVersionFinder implements VersionFinder<TimestampedDatasetVersion> {
private static final Logger LOGGER = LoggerFactory.getLogger(FileLevelTimestampVersionFinder.class);
private final FileSystem fs;
public FileLevelTimestampVersionFinder(FileSystem fs, Properties props) {
this(fs, ConfigFactory.parseProperties(props));
}
public FileLevelTimestampVersionFinder(FileSystem fs, Config config) {
this.fs = fs;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset) {
FileSystemDataset fsDataset = (FileSystemDataset) dataset;
try {
List<TimestampedDatasetVersion> timestampedVersions = Lists.newArrayList();
for (FileStatus fileStatus : FileListUtils.listMostNestedPathRecursively(this.fs, fsDataset.datasetRoot())) {
timestampedVersions.add(new TimestampedDatasetVersion(new DateTime(fileStatus.getModificationTime()),
fileStatus.getPath()));
}
return timestampedVersions;
} catch (IOException e) {
LOGGER.warn("Failed to get ModifiedTimeStamp for candidate dataset version at " + fsDataset.datasetRoot()
+ ". Ignoring.");
return Lists.newArrayList();
}
}
}
| 2,486 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/WatermarkDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.StringDatasetVersion;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
/**
* Finds watermarked dataset versions as direct subdirectories of the dataset directory. The watermark is assumed
* to be part of the subdirectory name. By default, the watermark is the subdirectory name itself, but a regular
* expression can be provided to extract the watermark from the name. The watermarks will be sorted by String
* sorting.
*
* <p>
* For example, snapshots of a database can be named by the unix timestamp when the snapshot was dumped:
* /path/to/snapshots/1436223009-snapshot
* /path/to/snapshots/1436234210-snapshot
* In this case the versions are 1436223009-snapshot, 1436234210-snapshot. Since the watermark is at the
* beginning of the name, the natural string ordering is good enough to sort the snapshots, so no regexp is
* required to extract the actual watermark.
* </p>
*/
public class WatermarkDatasetVersionFinder extends DatasetVersionFinder<StringDatasetVersion> {
public static final Logger LOGGER = LoggerFactory.getLogger(WatermarkDatasetVersionFinder.class);
public static final String WATERMARK_REGEX_KEY = "version.watermark.regex";
private Optional<Pattern> pattern;
public WatermarkDatasetVersionFinder(FileSystem fs, Properties props) {
this(fs, ConfigFactory.parseProperties(props));
}
public WatermarkDatasetVersionFinder(FileSystem fs, Config config) {
super(fs);
if (config.hasPath(WATERMARK_REGEX_KEY)) {
initPattern(config.getString(WATERMARK_REGEX_KEY));
} else {
this.pattern = Optional.absent();
}
}
private void initPattern(String patternString) {
this.pattern = Optional.of(patternString).transform(new Function<String, Pattern>() {
@Nullable
@Override
public Pattern apply(String input) {
return Pattern.compile(input);
}
});
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return StringDatasetVersion.class;
}
@Override
public Path globVersionPattern() {
return new Path("*");
}
@Override
public StringDatasetVersion getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath) {
if (this.pattern.isPresent()) {
Matcher matcher = this.pattern.get().matcher(pathRelativeToDatasetRoot.getName());
if (!matcher.find() || matcher.groupCount() < 1) {
LOGGER.warn("Candidate dataset version at " + pathRelativeToDatasetRoot
+ " does not match expected pattern. Ignoring.");
return null;
}
return new StringDatasetVersion(matcher.group(1), fullPath);
}
return new StringDatasetVersion(pathRelativeToDatasetRoot.getName(), fullPath);
}
}
| 2,487 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/LookbackDateTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import org.joda.time.Instant;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.ConfigUtils;
/**
* {@link DatasetVersionFinder} that constructs {@link TimestampedDatasetVersion}s without actually checking for existence
* of the version path. The version path is constructed by appending the version partition pattern to the dataset root.
* The versions are found by looking back a specific period of time and finding unique date partitions between that
* time and the current time. Lookback is supported to hourly granularity.
*/
public class LookbackDateTimeDatasetVersionFinder extends DateTimeDatasetVersionFinder {
public static final String VERSION_PATH_PREFIX = "version.path.prefix";
public static final String VERSION_LOOKBACK_PERIOD = "version.lookback.period";
private final Duration stepDuration;
private final Period lookbackPeriod;
private final String pathPrefix;
private final Instant endTime;
public LookbackDateTimeDatasetVersionFinder(FileSystem fs, Config config) {
this(fs, config, Instant.now());
}
@VisibleForTesting
public LookbackDateTimeDatasetVersionFinder(FileSystem fs, Config config, Instant endTime) {
super(fs, config);
Preconditions.checkArgument(config.hasPath(VERSION_LOOKBACK_PERIOD) , "Missing required property " + VERSION_LOOKBACK_PERIOD);
PeriodFormatter periodFormatter =
new PeriodFormatterBuilder().appendYears().appendSuffix("y").appendMonths().appendSuffix("M").appendDays()
.appendSuffix("d").appendHours().appendSuffix("h").toFormatter();
this.stepDuration = Duration.standardHours(1);
this.pathPrefix = ConfigUtils.getString(config, VERSION_PATH_PREFIX, "");
this.lookbackPeriod = periodFormatter.parsePeriod(config.getString(VERSION_LOOKBACK_PERIOD));
this.endTime = endTime;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
FileSystemDataset fsDataset = (FileSystemDataset) dataset;
Set<TimestampedDatasetVersion> versions = new HashSet<>();
Instant startTime = endTime.minus(lookbackPeriod.toStandardDuration());
for (Instant time = startTime; !time.isAfter(endTime); time = time.plus(stepDuration)) {
String truncatedTime = formatter.print(time);
DateTime versionTime = formatter.parseDateTime(truncatedTime);
Path versionPath = new Path(fsDataset.datasetRoot(), new Path(pathPrefix, truncatedTime));
versions.add(new TimestampedDatasetVersion(versionTime, versionPath));
}
return versions;
}
}
| 2,488 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/AbstractDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Lists;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.util.PathUtils;
/**
* Class to find {@link FileSystemDataset} versions in the file system.
*
* Concrete subclasses should implement a ({@link org.apache.hadoop.fs.FileSystem}, {@link java.util.Properties})
* constructor to be instantiated.
*
* Provides a callback {@link AbstractDatasetVersionFinder#getDatasetVersion(Path, FileStatus)} which subclasses need to
* implement.
*
* @param <T> Type of {@link org.apache.gobblin.data.management.version.FileSystemDatasetVersion} expected from this class.
*/
public abstract class AbstractDatasetVersionFinder<T extends FileSystemDatasetVersion> implements VersionFinder<T> {
protected FileSystem fs;
public AbstractDatasetVersionFinder(FileSystem fs, Properties props) {
this.fs = fs;
}
public AbstractDatasetVersionFinder(FileSystem fs) {
this(fs, new Properties());
}
/**
* Find dataset versions in the input {@link org.apache.hadoop.fs.Path}. Dataset versions are subdirectories of the
* input {@link org.apache.hadoop.fs.Path} representing a single manageable unit in the dataset.
* See {@link org.apache.gobblin.data.management.retention.DatasetCleaner} for more information.
*
* @param dataset {@link org.apache.hadoop.fs.Path} to directory containing all versions of a dataset.
* @return Map of {@link org.apache.gobblin.data.management.version.DatasetVersion} and {@link org.apache.hadoop.fs.FileStatus}
* for each dataset version found.
* @throws IOException
*/
@Override
public Collection<T> findDatasetVersions(Dataset dataset) throws IOException {
FileSystemDataset fsDataset = (FileSystemDataset) dataset;
Path versionGlobStatus = new Path(fsDataset.datasetRoot(), globVersionPattern());
FileStatus[] dataSetVersionPaths = this.fs.globStatus(versionGlobStatus);
List<T> dataSetVersions = Lists.newArrayList();
for (FileStatus dataSetVersionPath : dataSetVersionPaths) {
T datasetVersion =
getDatasetVersion(PathUtils.relativizePath(dataSetVersionPath.getPath(), fsDataset.datasetRoot()),
dataSetVersionPath);
if (datasetVersion != null) {
dataSetVersions.add(datasetVersion);
}
}
return dataSetVersions;
}
/**
* Should return class of T.
*/
@Override
public abstract Class<? extends FileSystemDatasetVersion> versionClass();
/**
* Glob pattern relative to the root of the dataset used to find {@link org.apache.hadoop.fs.FileStatus} for each
* dataset version.
* @return glob pattern relative to dataset root.
*/
public abstract Path globVersionPattern();
/**
* Create a {@link org.apache.gobblin.data.management.version.DatasetVersion} with <code>versionFileStatus</code> and a path
* relative to the dataset.
* @param pathRelativeToDatasetRoot {@link org.apache.hadoop.fs.Path} of dataset version relative to dataset root.
* @param versionFileStatus {@link FileStatus} of the dataset version.
* @return {@link org.apache.gobblin.data.management.version.DatasetVersion} for that {@link FileStatus}.
*/
public abstract T getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus);
}
| 2,489 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/ModDateTimeDatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import java.util.Collection;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.joda.time.DateTime;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* {@link VersionFinder} for datasets based on modification timestamps.
*/
public class ModDateTimeDatasetVersionFinder implements VersionFinder<TimestampedDatasetVersion> {
private final FileSystem fs;
public ModDateTimeDatasetVersionFinder(FileSystem fs, Properties props) {
this(fs, ConfigFactory.parseProperties(props));
}
public ModDateTimeDatasetVersionFinder(FileSystem fs, Config conf) {
this.fs = fs;
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return TimestampedDatasetVersion.class;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset) throws IOException {
FileSystemDataset fsDataset = (FileSystemDataset) dataset;
FileStatus status = this.fs.getFileStatus(fsDataset.datasetRoot());
return Lists.newArrayList(new TimestampedDatasetVersion(new DateTime(status.getModificationTime()), fsDataset
.datasetRoot()));
}
}
| 2,490 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/version/finder/DatasetVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Class to find {@link FileSystemDataset} versions in the file system.
*
* Concrete subclasses should implement a ({@link org.apache.hadoop.fs.FileSystem}, {@link java.util.Properties})
* constructor to be instantiated.
*
* Provides a callback with just the path of the version {@link DatasetVersionFinder#getDatasetVersion(Path, Path)}.
* Use {@link AbstractDatasetVersionFinder#getDatasetVersion(Path, FileStatus)} if you need a callback with {@link FileStatus}
* of the version.
*
* @param <T> Type of {@link org.apache.gobblin.data.management.version.FileSystemDatasetVersion} expected from this class.
*/
public abstract class DatasetVersionFinder<T extends FileSystemDatasetVersion> extends AbstractDatasetVersionFinder<T>
implements VersionFinder<T> {
public DatasetVersionFinder(FileSystem fs, Properties props) {
super(fs, props);
}
public DatasetVersionFinder(FileSystem fs) {
this(fs, new Properties());
}
@Override
public T getDatasetVersion(Path pathRelativeToDatasetRoot, FileStatus versionFileStatus) {
return getDatasetVersion(pathRelativeToDatasetRoot, versionFileStatus.getPath());
}
/**
* Parse {@link org.apache.gobblin.data.management.version.DatasetVersion} from the path of a dataset version.
* @param pathRelativeToDatasetRoot {@link org.apache.hadoop.fs.Path} of dataset version relative to dataset root.
* @param fullPath full {@link org.apache.hadoop.fs.Path} of the dataset version.
* @return {@link org.apache.gobblin.data.management.version.DatasetVersion} for that path.
*/
public abstract T getDatasetVersion(Path pathRelativeToDatasetRoot, Path fullPath);
}
| 2,491 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/converter/AbstractAvroToOrcConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.converter;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import org.apache.avro.Schema;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.thrift.TException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset.ConversionConfig;
import org.apache.gobblin.data.management.conversion.hive.entities.HiveProcessingEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHiveConversionEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.events.EventWorkunitUtils;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.metrics.event.sla.SlaEventKeys;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.HadoopUtils;
import static org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils.getOutputDataLocation;
/**
* Builds the Hive avro to ORC conversion query. The record type for this converter is {@link QueryBasedHiveConversionEntity}. A {@link QueryBasedHiveConversionEntity}
* can be a hive table or a hive partition.
* <p>
* Concrete subclasses define the semantics of Avro to ORC conversion for a specific ORC format by providing {@link ConversionConfig}s.
* </p>
*/
@Slf4j
public abstract class AbstractAvroToOrcConverter extends Converter<Schema, Schema, QueryBasedHiveConversionEntity, QueryBasedHiveConversionEntity> {
/***
* Subdirectory within destination ORC table directory to publish data
*/
private static final String PUBLISHED_TABLE_SUBDIRECTORY = "final";
public static final String OUTPUT_AVRO_SCHEMA_KEY = "output.avro.schema";
private static final String ORC_FORMAT = "orc";
/**
* Hive runtime property key names for tracking
*/
private static final String GOBBLIN_DATASET_URN_KEY = "gobblin.datasetUrn";
private static final String GOBBLIN_PARTITION_NAME_KEY = "gobblin.partitionName";
private static final String GOBBLIN_WORKUNIT_CREATE_TIME_KEY = "gobblin.workunitCreateTime";
/***
* Separators used by Hive
*/
private static final String HIVE_PARTITIONS_INFO = "/";
private static final String HIVE_PARTITIONS_TYPE = ":";
protected final FileSystem fs;
/**
* Supported destination ORC formats
*/
protected enum OrcFormats {
FLATTENED_ORC("flattenedOrc"),
NESTED_ORC("nestedOrc");
private final String configPrefix;
OrcFormats(String configPrefix) {
this.configPrefix = configPrefix;
}
public String getConfigPrefix() {
return this.configPrefix;
}
}
/**
* list of partitions that a partition has replaced. E.g. list of hourly partitons for a daily partition
*/
public static final String REPLACED_PARTITIONS_HIVE_METASTORE_KEY = "gobblin.replaced.partitions";
/**
* The dataset being converted.
*/
protected ConvertibleHiveDataset hiveDataset;
/**
* If the property is set to true then in the destination dir permissions, group won't be explicitly set.
*/
public static final String HIVE_DATASET_DESTINATION_SKIP_SETGROUP = "hive.dataset.destination.skip.setGroup";
public static final boolean DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP = false;
public static final String HIVE_DATASET_DESTINATION_GROUP_NAME = "hive.dataset.destination.groupName";
public static final String HIVE_DATASET_STAGING_GROUP_NAME = "hive.dataset.staging.groupName";
/**
* If set to true, a set format DDL will be separate from add partition DDL
*/
public static final String HIVE_CONVERSION_SETSERDETOAVROEXPLICITELY = "hive.conversion.setSerdeToAvroExplicitly";
public static final boolean DEFAULT_HIVE_CONVERSION_SETSERDETOAVROEXPLICITELY = true;
/***
* Global Hive conversion view registration whitelist / blacklist key
*/
public static final String HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST = "hive.conversion.view.registration.whitelist";
public static final String HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST = "hive.conversion.view.registration.blacklist";
/**
* Subclasses can convert the {@link Schema} if required.
*
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertSchema(java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public abstract Schema convertSchema(Schema inputSchema, WorkUnitState workUnit);
/**
* <p>
* This method is called by {@link AbstractAvroToOrcConverter#convertRecord(Schema, QueryBasedHiveConversionEntity, WorkUnitState)} before building the
* conversion query. Subclasses can find out if conversion is enabled for their format by calling
* {@link ConvertibleHiveDataset#getConversionConfigForFormat(String)} on the <code>hiveDataset</code>.<br>
* Available ORC formats are defined by the enum {@link OrcFormats}
* </p>
* <p>
* If this method returns false, no Avro to to ORC conversion queries will be built for the ORC format.
* </p>
* @return true if conversion is required. false otherwise
*/
protected abstract boolean hasConversionConfig();
/**
* Get the {@link ConversionConfig} required for building the Avro to ORC conversion query
* @return Conversion config
*/
protected abstract ConversionConfig getConversionConfig();
public AbstractAvroToOrcConverter() {
try {
this.fs = FileSystem.get(HadoopUtils.newConfiguration());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Populate the avro to orc conversion queries. The Queries will be added to {@link QueryBasedHiveConversionEntity#getQueries()}
*/
@Override
public Iterable<QueryBasedHiveConversionEntity> convertRecord(Schema outputAvroSchema, QueryBasedHiveConversionEntity conversionEntity, WorkUnitState workUnit)
throws DataConversionException {
Preconditions.checkNotNull(outputAvroSchema, "Avro schema must not be null");
Preconditions.checkNotNull(conversionEntity, "Conversion entity must not be null");
Preconditions.checkNotNull(workUnit, "Workunit state must not be null");
Preconditions.checkNotNull(conversionEntity.getTable(), "Hive table within conversion entity must not be null");
EventWorkunitUtils.setBeginDDLBuildTimeMetadata(workUnit, System.currentTimeMillis());
this.hiveDataset = conversionEntity.getConvertibleHiveDataset();
if (!hasConversionConfig()) {
return new SingleRecordIterable<>(conversionEntity);
}
// Avro table name and location
String avroTableName = conversionEntity.getTable().getTableName();
// ORC table name and location
String orcTableName = getConversionConfig().getDestinationTableName();
String orcStagingTableName = getOrcStagingTableName(getConversionConfig().getDestinationStagingTableName());
String orcTableDatabase = getConversionConfig().getDestinationDbName();
String orcDataLocation = getOrcDataLocation();
String orcStagingDataLocation = getOrcStagingDataLocation(orcStagingTableName);
boolean isEvolutionEnabled = getConversionConfig().isEvolutionEnabled();
boolean isCasePreserved = getConversionConfig().isCasePreserved();
Pair<Optional<Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils.getDestinationTableMeta(orcTableDatabase,
orcTableName, workUnit.getProperties());
Optional<Table> destinationTableMeta = destinationMeta.getLeft();
// Optional
// View registration blacklist / whitelist
Optional<WhitelistBlacklist> optionalViewRegistrationWhiteBlacklist = getViewWhiteBackListFromWorkUnit(workUnit);
// wrapperViewName : If specified view with 'wrapperViewName' is created if not already exists
// over destination table
// isUpdateViewAlwaysEnabled: If false 'wrapperViewName' is only updated when schema evolves; if true
// 'wrapperViewName' is always updated (everytime publish happens)
Optional<String> wrapperViewName = Optional.<String>absent();
if (optionalViewRegistrationWhiteBlacklist.isPresent()) {
wrapperViewName = optionalViewRegistrationWhiteBlacklist.get().acceptTable(orcTableDatabase, orcTableName)
? getConversionConfig().getDestinationViewName() : wrapperViewName;
} else {
wrapperViewName = getConversionConfig().getDestinationViewName();
}
boolean shouldUpdateView = getConversionConfig().isUpdateViewAlwaysEnabled();
// Other properties
Optional<List<String>> clusterBy =
getConversionConfig().getClusterBy().isEmpty()
? Optional.<List<String>> absent()
: Optional.of(getConversionConfig().getClusterBy());
Optional<Integer> numBuckets = getConversionConfig().getNumBuckets();
Optional<Integer> rowLimit = getConversionConfig().getRowLimit();
Properties tableProperties = getConversionConfig().getDestinationTableProperties();
// Partition dir hint helps create different directory for hourly and daily partition with same timestamp, such as:
// .. daily_2016-01-01-00 and hourly_2016-01-01-00
// This helps existing hourly data from not being deleted at the time of roll up, and so Hive queries in flight
// .. do not fail
List<String> sourceDataPathIdentifier = getConversionConfig().getSourceDataPathIdentifier();
// Populate optional partition info
Map<String, String> partitionsDDLInfo = Maps.newHashMap();
Map<String, String> partitionsDMLInfo = Maps.newHashMap();
HiveConverterUtils.populatePartitionInfo(conversionEntity, partitionsDDLInfo, partitionsDMLInfo);
/*
* Create ORC data location with the same permissions as Avro data
*
* Note that hive can also automatically create the non-existing directories but it does not
* seem to create it with the desired permissions.
* According to hive docs permissions for newly created directories/files can be controlled using uMask like,
*
* SET hive.warehouse.subdir.inherit.perms=false;
* SET fs.permissions.umask-mode=022;
* Upon testing, this did not work
*/
try {
FileStatus sourceDataFileStatus = this.fs.getFileStatus(conversionEntity.getTable().getDataLocation());
FsPermission sourceDataPermission = sourceDataFileStatus.getPermission();
if (!this.fs.mkdirs(new Path(getConversionConfig().getDestinationDataPath()), sourceDataPermission)) {
throw new RuntimeException(String.format("Failed to create path %s with permissions %s", new Path(
getConversionConfig().getDestinationDataPath()), sourceDataPermission));
} else {
this.fs.setPermission(new Path(getConversionConfig().getDestinationDataPath()), sourceDataPermission);
// Explicitly set group name for destination location if specified otherwise preserve source group name
String destinationGroupName;
if (workUnit.contains(HIVE_DATASET_DESTINATION_GROUP_NAME)) {
destinationGroupName = workUnit.getProp(HIVE_DATASET_DESTINATION_GROUP_NAME);
} else {
destinationGroupName = sourceDataFileStatus.getGroup();
}
if (!workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP,
DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
this.fs.setOwner(new Path(getConversionConfig().getDestinationDataPath()), null, destinationGroupName);
}
log.info(String.format("Created %s with permissions %s and group %s", new Path(getConversionConfig()
.getDestinationDataPath()), sourceDataPermission, sourceDataFileStatus.getGroup()));
// Explicitly set group name for staging directory if specified
if (workUnit.contains(HIVE_DATASET_STAGING_GROUP_NAME)) {
String stagingGroupName = workUnit.getProp(HIVE_DATASET_STAGING_GROUP_NAME);
log.info("Setting staging directory group name as " + stagingGroupName);
this.fs.mkdirs(new Path(getOrcStagingDataLocation(orcStagingTableName)));
this.fs.setOwner(new Path(getOrcStagingDataLocation(orcStagingTableName)), null, stagingGroupName);
// Staging directory will be renamed to getOrcDataLocation() and hence it's group name should match
// with the group name of the staging directory
this.fs.mkdirs(new Path(getOrcDataLocation()));
this.fs.setOwner(new Path(getOrcDataLocation()), null, stagingGroupName);
}
}
} catch (IOException e) {
Throwables.propagate(e);
}
// Set hive runtime properties from conversion config
for (Map.Entry<Object, Object> entry : getConversionConfig().getHiveRuntimeProperties().entrySet()) {
conversionEntity.getQueries().add(String.format("SET %s=%s", entry.getKey(), entry.getValue()));
}
// Set hive runtime properties for tracking
conversionEntity.getQueries().add(String.format("SET %s=%s", GOBBLIN_DATASET_URN_KEY,
conversionEntity.getTable().getCompleteName()));
if (conversionEntity.getPartition().isPresent()) {
conversionEntity.getQueries().add(String.format("SET %s=%s", GOBBLIN_PARTITION_NAME_KEY,
conversionEntity.getPartition().get().getCompleteName()));
}
conversionEntity.getQueries().add(String
.format("SET %s=%s", GOBBLIN_WORKUNIT_CREATE_TIME_KEY,
workUnit.getWorkunit().getProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY)));
workUnit.setProp(OUTPUT_AVRO_SCHEMA_KEY, outputAvroSchema.toString());
// Create DDL statement for table
Map<String, String> hiveColumns = new LinkedHashMap<>();
String createStagingTableDDL =
HiveAvroORCQueryGenerator.generateCreateTableDDL(outputAvroSchema,
orcStagingTableName,
orcStagingDataLocation,
Optional.of(orcTableDatabase),
Optional.of(partitionsDDLInfo),
clusterBy,
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(),
numBuckets,
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<String>absent(),
tableProperties,
isEvolutionEnabled,
isCasePreserved,
destinationTableMeta,
hiveColumns);
conversionEntity.getQueries().add(createStagingTableDDL);
log.debug("Create staging table DDL: " + createStagingTableDDL);
// Create DDL statement for partition
String orcStagingDataPartitionDirName = HiveConverterUtils.getStagingDataPartitionDirName(conversionEntity, sourceDataPathIdentifier);
String orcStagingDataPartitionLocation = orcStagingDataLocation + Path.SEPARATOR + orcStagingDataPartitionDirName;
if (partitionsDMLInfo.size() > 0) {
List<String> createStagingPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(orcTableDatabase,
orcStagingTableName,
orcStagingDataPartitionLocation,
partitionsDMLInfo);
conversionEntity.getQueries().addAll(createStagingPartitionDDL);
log.debug("Create staging partition DDL: " + createStagingPartitionDDL);
}
// Create DML statement
String insertInORCStagingTableDML =
HiveAvroORCQueryGenerator
.generateTableMappingDML(conversionEntity.getHiveTable().getAvroSchema(),
outputAvroSchema,
avroTableName,
orcStagingTableName,
Optional.of(conversionEntity.getTable().getDbName()),
Optional.of(orcTableDatabase),
Optional.of(partitionsDMLInfo),
Optional.<Boolean>absent(),
Optional.<Boolean>absent(),
isEvolutionEnabled,
destinationTableMeta,
rowLimit);
conversionEntity.getQueries().add(insertInORCStagingTableDML);
log.debug("Conversion staging DML: " + insertInORCStagingTableDML);
// TODO: Split this method into two (conversion and publish)
// Addition to WUS for Staging publish:
// A. Evolution turned on:
// 1. If table does not exists: simply create it (now it should exist)
// 2. If table exists:
// 2.1 Evolve table (alter table)
// 2.2 If snapshot table:
// 2.2.1 Delete data in final table directory
// 2.2.2 Move data from staging to final table directory
// 2.2.3 Drop this staging table and delete directories
// 2.3 If partitioned table, move partitions from staging to final table; for all partitions:
// 2.3.1 Drop if exists partition in final table
// 2.3.2 Move partition directory
// 2.3.3 Create partition with location
// 2.3.4 Drop this staging table and delete directories
// B. Evolution turned off:
// 1. If table does not exists: simply create it (now it should exist)
// 2. If table exists:
// 2.1 Do not evolve table
// 2.2 If snapshot table:
// 2.2.1 Delete data in final table directory
// 2.2.2 Move data from staging to final table directory
// 2.2.3 Drop this staging table and delete directories
// 2.3 If partitioned table, move partitions from staging to final table; for all partitions:
// 2.3.1 Drop if exists partition in final table
// 2.3.2 Move partition directory
// 2.3.3 Create partition with location
// 2.3.4 Drop this staging table and delete directories
// Note: The queries below also serve as compatibility check module before conversion, an incompatible
// .. schema throws a Runtime exeption, hence preventing further execution
QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity();
List<String> publishQueries = publishEntity.getPublishQueries();
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
List<String> cleanupQueries = publishEntity.getCleanupQueries();
List<String> cleanupDirectories = publishEntity.getCleanupDirectories();
// Step:
// A.1, B.1: If table does not exists, simply create it
if (!destinationTableMeta.isPresent()) {
String createTargetTableDDL =
HiveAvroORCQueryGenerator.generateCreateTableDDL(outputAvroSchema,
orcTableName,
orcDataLocation,
Optional.of(orcTableDatabase),
Optional.of(partitionsDDLInfo),
clusterBy,
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(),
numBuckets,
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<String>absent(),
tableProperties,
isCasePreserved,
isEvolutionEnabled,
destinationTableMeta,
new HashMap<String, String>());
publishQueries.add(createTargetTableDDL);
log.debug("Create final table DDL: " + createTargetTableDDL);
}
// Step:
// A.2.1: If table pre-exists (destinationTableMeta would be present), evolve table and update table properties
// B.2.1: No-op
List<String> evolutionDDLs = HiveAvroORCQueryGenerator.generateEvolutionDDL(orcStagingTableName,
orcTableName,
Optional.of(orcTableDatabase),
Optional.of(orcTableDatabase),
outputAvroSchema,
isEvolutionEnabled,
hiveColumns,
destinationTableMeta,
tableProperties);
log.debug("Evolve final table DDLs: " + evolutionDDLs);
EventWorkunitUtils.setEvolutionMetadata(workUnit, evolutionDDLs);
// View (if present) must be updated if evolution happens
shouldUpdateView |= evolutionDDLs.size() > 0;
publishQueries.addAll(evolutionDDLs);
if (partitionsDDLInfo.size() == 0) {
// Step:
// A.2.2, B.2.2: Snapshot table
// Step:
// A.2.2.1, B.2.2.1: Delete data in final table directory
// A.2.2.2, B.2.2.2: Move data from staging to final table directory
log.info("Snapshot directory to move: " + orcStagingDataLocation + " to: " + orcDataLocation);
publishDirectories.put(orcStagingDataLocation, orcDataLocation);
// Step:
// A.2.2.3, B.2.2.3: Drop this staging table and delete directories
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(orcTableDatabase, orcStagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
// Delete: orcStagingDataLocation
log.info("Staging table directory to delete: " + orcStagingDataLocation);
cleanupDirectories.add(orcStagingDataLocation);
} else {
// Step:
// A.2.3, B.2.3: If partitioned table, move partitions from staging to final table; for all partitions:
// Step:
// A.2.3.2, B.2.3.2: Move partition directory
// Move: orcStagingDataPartitionLocation to: orcFinalDataPartitionLocation
String orcFinalDataPartitionLocation = orcDataLocation + Path.SEPARATOR + orcStagingDataPartitionDirName;
Optional<Path> destPartitionLocation = getDestinationPartitionLocation(destinationTableMeta, workUnit,
conversionEntity.getPartition().get().getName());
orcFinalDataPartitionLocation =
HiveConverterUtils.updatePartitionLocation(orcFinalDataPartitionLocation, workUnit, destPartitionLocation);
log.info(
"Partition directory to move: " + orcStagingDataPartitionLocation + " to: " + orcFinalDataPartitionLocation);
publishDirectories.put(orcStagingDataPartitionLocation, orcFinalDataPartitionLocation);
// Step:
// A.2.3.1, B.2.3.1: Drop if exists partition in final table
// Step:
// If destination partition already exists, alter the partition location
// A.2.3.3, B.2.3.3: Create partition with location (and update storage format if not in ORC already)
List<String> dropPartitionsDDL =
HiveAvroORCQueryGenerator.generateDropPartitionsDDL(orcTableDatabase,
orcTableName,
partitionsDMLInfo);
log.debug("Drop partitions if exist in final table: " + dropPartitionsDDL);
publishQueries.addAll(dropPartitionsDDL);
if (workUnit.getPropAsBoolean(HIVE_CONVERSION_SETSERDETOAVROEXPLICITELY,
DEFAULT_HIVE_CONVERSION_SETSERDETOAVROEXPLICITELY)) {
List<String> createFinalPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(orcTableDatabase,
orcTableName,
orcFinalDataPartitionLocation,
partitionsDMLInfo,
Optional.<String>absent());
log.debug("Create final partition DDL: " + createFinalPartitionDDL);
publishQueries.addAll(createFinalPartitionDDL);
// Updating storage format non-transactionally is a stop gap measure until Hive supports transactionally update
// .. storage format in ADD PARITTION command (today it only supports specifying location)
List<String> updatePartitionStorageFormatDDL =
HiveAvroORCQueryGenerator.generateAlterTableOrPartitionStorageFormatDDL(orcTableDatabase,
orcTableName,
Optional.of(partitionsDMLInfo),
ORC_FORMAT);
log.debug("Update final partition storage format to ORC (if not already in ORC)");
publishQueries.addAll(updatePartitionStorageFormatDDL);
} else {
List<String> createFinalPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(orcTableDatabase,
orcTableName,
orcFinalDataPartitionLocation,
partitionsDMLInfo,
Optional.fromNullable(ORC_FORMAT));
log.debug("Create final partition DDL: " + createFinalPartitionDDL);
publishQueries.addAll(createFinalPartitionDDL);
}
// Step:
// A.2.3.4, B.2.3.4: Drop this staging table and delete directories
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(orcTableDatabase, orcStagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
// Delete: orcStagingDataLocation
log.info("Staging table directory to delete: " + orcStagingDataLocation);
cleanupDirectories.add(orcStagingDataLocation);
}
/*
* Drop the replaced partitions if any. This is required in case the partition being converted is derived from
* several other partitions. E.g. Daily partition is a replacement of hourly partitions of the same day. When daily
* partition is converted to ORC all it's hourly ORC partitions need to be dropped.
*/
publishQueries.addAll(HiveAvroORCQueryGenerator.generateDropPartitionsDDL(orcTableDatabase,
orcTableName,
getDropPartitionsDDLInfo(conversionEntity)));
/*
* Create or update view over the ORC table if specified in the config (ie. wrapper view name is present in config)
*/
if (wrapperViewName.isPresent()) {
String viewName = wrapperViewName.get();
List<String> createOrUpdateViewDDLs = HiveAvroORCQueryGenerator.generateCreateOrUpdateViewDDL(orcTableDatabase,
orcTableName, orcTableDatabase, viewName, shouldUpdateView);
log.debug("Create or update View DDLs: " + createOrUpdateViewDDLs);
publishQueries.addAll(createOrUpdateViewDDLs);
}
HiveAvroORCQueryGenerator.serializePublishCommands(workUnit, publishEntity);
log.debug("Publish partition entity: " + publishEntity);
log.debug("Conversion Query " + conversionEntity.getQueries());
EventWorkunitUtils.setEndDDLBuildTimeMetadata(workUnit, System.currentTimeMillis());
return new SingleRecordIterable<>(conversionEntity);
}
/***
* Get Hive view registration whitelist blacklist from Workunit state
* @param workUnit Workunit containing view whitelist blacklist property
* @return Optional WhitelistBlacklist if Workunit contains it
*/
@VisibleForTesting
public static Optional<WhitelistBlacklist> getViewWhiteBackListFromWorkUnit(WorkUnitState workUnit) {
Optional<WhitelistBlacklist> optionalViewWhiteBlacklist = Optional.absent();
if (workUnit == null) {
return optionalViewWhiteBlacklist;
}
if (workUnit.contains(HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST)
|| workUnit.contains(HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST)) {
String viewWhiteList = workUnit.getProp(HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST, StringUtils.EMPTY);
String viewBlackList = workUnit.getProp(HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST, StringUtils.EMPTY);
try {
optionalViewWhiteBlacklist = Optional.of(new WhitelistBlacklist(viewWhiteList, viewBlackList));
} catch (IOException e) {
Throwables.propagate(e);
}
}
return optionalViewWhiteBlacklist;
}
/***
* Get the staging table name for current converter. Each converter creates its own staging table.
* @param stagingTableNamePrefix for the staging table for this converter.
* @return Staging table name.
*/
private String getOrcStagingTableName(String stagingTableNamePrefix) {
int randomNumber = new Random().nextInt(10);
String uniqueStagingTableQualifier = String.format("%s%s", System.currentTimeMillis(), randomNumber);
return stagingTableNamePrefix + "_" + uniqueStagingTableQualifier;
}
/***
* Get the ORC final table location of format: <ORC final table location>/final
* @return ORC final table location.
*/
private String getOrcDataLocation() {
String orcDataLocation = getConversionConfig().getDestinationDataPath();
return getConversionConfig().getDataDstPathUseSubdir() ? getOutputDataLocation(orcDataLocation)
: orcDataLocation;
}
/***
* Get the ORC staging table location of format: <ORC final table location>/<ORC staging table name>
* @param orcStagingTableName ORC staging table name.
* @return ORC staging table location.
*/
private String getOrcStagingDataLocation(String orcStagingTableName) {
String orcDataLocation = getConversionConfig().getDestinationDataPath();
return orcDataLocation + Path.SEPARATOR + orcStagingTableName;
}
@VisibleForTesting
public static List<Map<String, String>> getDropPartitionsDDLInfo(HiveProcessingEntity conversionEntity) {
if (!conversionEntity.getPartition().isPresent()) {
return Collections.emptyList();
}
return getDropPartitionsDDLInfo(conversionEntity.getPartition().get());
}
/**
* Parse the {@link #REPLACED_PARTITIONS_HIVE_METASTORE_KEY} from partition parameters to returns DDLs for all the partitions to be
* dropped.
*
* @return A {@link List} of partitions to be dropped. Each element of the list is a {@link Map} which maps a partition's
* key and value.
*
*/
public static List<Map<String, String>> getDropPartitionsDDLInfo(Partition hivePartition) {
List<Map<String, String>> replacedPartitionsDDLInfo = Lists.newArrayList();
List<FieldSchema> partitionKeys = hivePartition.getTable().getPartitionKeys();
if (StringUtils.isNotBlank(hivePartition.getParameters().get(REPLACED_PARTITIONS_HIVE_METASTORE_KEY))) {
// Partitions are separated by "|"
for (String partitionsInfoString : Splitter.on("|").omitEmptyStrings().split(hivePartition.getParameters().get(REPLACED_PARTITIONS_HIVE_METASTORE_KEY))) {
// Values for a partition are separated by ","
List<String> partitionValues = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
// Do not drop the partition being processed. Sometimes a partition may have replaced another partition of the same values.
if (!partitionValues.equals(hivePartition.getValues())) {
ImmutableMap.Builder<String, String> partitionDDLInfoMap = ImmutableMap.builder();
for (int i = 0; i < partitionKeys.size(); i++) {
partitionDDLInfoMap.put(partitionKeys.get(i).getName(), partitionValues.get(i));
}
replacedPartitionsDDLInfo.add(partitionDDLInfoMap.build());
}
}
}
return replacedPartitionsDDLInfo;
}
private Optional<Path> getDestinationPartitionLocation(Optional<Table> table, WorkUnitState state,
String partitionName)
throws DataConversionException {
Optional<org.apache.hadoop.hive.metastore.api.Partition> partitionOptional =
Optional.<org.apache.hadoop.hive.metastore.api.Partition>absent();
if (!table.isPresent()) {
return Optional.<Path>absent();
}
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(state.getJobState().getProperties(),
Optional.fromNullable(state.getJobState().getProp(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
partitionOptional =
Optional.of(client.get().getPartition(table.get().getDbName(), table.get().getTableName(), partitionName));
} catch (NoSuchObjectException e) {
return Optional.<Path>absent();
}
if (partitionOptional.isPresent()) {
org.apache.hadoop.hive.ql.metadata.Table qlTable = new org.apache.hadoop.hive.ql.metadata.Table(table.get());
org.apache.hadoop.hive.ql.metadata.Partition qlPartition =
new org.apache.hadoop.hive.ql.metadata.Partition(qlTable, partitionOptional.get());
return Optional.of(qlPartition.getDataLocation());
}
} catch (IOException | TException | HiveException e) {
throw new DataConversionException(
String.format("Could not fetch destination table %s.%s metadata", table.get().getDbName(),
table.get().getTableName()), e);
}
return Optional.<Path>absent();
}
}
| 2,492 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/converter/HiveAvroToNestedOrcConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.converter;
import org.apache.avro.Schema;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset.ConversionConfig;
/**
* An Avro to ORC converter for avro to nested ORC. {@link OrcFormats#NESTED_ORC}
*/
public class HiveAvroToNestedOrcConverter extends AbstractAvroToOrcConverter {
/**
* No {@link Schema} conversion required. Retain the original {@link Schema}
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter#convertSchema(org.apache.avro.Schema, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) {
return inputSchema;
}
/**
* Return true if flattened orc configurations are available. False otherwise
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter#hasConversionConfig()
*/
@Override
protected boolean hasConversionConfig() {
return super.hiveDataset.getConversionConfigForFormat(OrcFormats.NESTED_ORC.getConfigPrefix()).isPresent();
}
@Override
protected ConversionConfig getConversionConfig() {
return super.hiveDataset.getConversionConfigForFormat(OrcFormats.NESTED_ORC.getConfigPrefix()).get();
}
}
| 2,493 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/converter/HiveAvroToFlattenedOrcConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.converter;
import org.apache.avro.Schema;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset.ConversionConfig;
import org.apache.gobblin.util.AvroFlattener;
/**
* An Avro to ORC converter for avro to flattened ORC. {@link OrcFormats#FLATTENED_ORC}
*/
public class HiveAvroToFlattenedOrcConverter extends AbstractAvroToOrcConverter {
private static AvroFlattener AVRO_FLATTENER = new AvroFlattener();
/**
* Flatten the <code>inputSchema</code>
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter#convertSchema(org.apache.avro.Schema, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) {
return AVRO_FLATTENER.flatten(inputSchema, false);
}
/**
* Return true if flattened orc configurations are available. False otherwise
* {@inheritDoc}
* @see org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter#hasConversionConfig()
*/
@Override
protected boolean hasConversionConfig() {
return super.hiveDataset.getConversionConfigForFormat(OrcFormats.FLATTENED_ORC.getConfigPrefix()).isPresent();
}
@Override
protected ConversionConfig getConversionConfig() {
return super.hiveDataset.getConversionConfigForFormat(OrcFormats.FLATTENED_ORC.getConfigPrefix()).get();
}
}
| 2,494 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/publisher/HiveConvertPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.publisher;
import java.io.IOException;
import java.net.URI;
import java.sql.SQLException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.thrift.TException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.base.Splitter;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.collect.Sets;
import javax.annotation.Nonnull;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.data.management.conversion.hive.avro.AvroSchemaManager;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHivePublishEntity;
import org.apache.gobblin.data.management.conversion.hive.events.EventConstants;
import org.apache.gobblin.data.management.conversion.hive.events.EventWorkunitUtils;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.utils.LineageUtils;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker;
import org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarkerFactory;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.metrics.event.sla.SlaEventSubmitter;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.HiveJdbcConnector;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A simple {@link DataPublisher} updates the watermark and working state
*/
@Slf4j
public class HiveConvertPublisher extends DataPublisher {
private final AvroSchemaManager avroSchemaManager;
private final HiveJdbcConnector hiveJdbcConnector;
private MetricContext metricContext;
private EventSubmitter eventSubmitter;
private final FileSystem fs;
private final HiveSourceWatermarker watermarker;
private final HiveMetastoreClientPool pool;
private final Optional<LineageInfo> lineageInfo;
public static final String PARTITION_PARAMETERS_WHITELIST = "hive.conversion.partitionParameters.whitelist";
public static final String PARTITION_PARAMETERS_BLACKLIST = "hive.conversion.partitionParameters.blacklist";
public static final String COMPLETE_SOURCE_PARTITION_NAME = "completeSourcePartitionName";
public static final String COMPLETE_DEST_PARTITION_NAME = "completeDestPartitionName";
private static final Splitter COMMA_SPLITTER = Splitter.on(",").omitEmptyStrings().trimResults();
private static final Splitter At_SPLITTER = Splitter.on("@").omitEmptyStrings().trimResults();
public HiveConvertPublisher(State state) throws IOException {
super(state);
this.avroSchemaManager = new AvroSchemaManager(FileSystem.get(HadoopUtils.newConfiguration()), state);
this.metricContext = Instrumented.getMetricContext(state, HiveConvertPublisher.class);
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE).build();
// Extract LineageInfo from state
if (state instanceof SourceState) {
lineageInfo = LineageInfo.getLineageInfo(((SourceState) state).getBroker());
} else if (state instanceof WorkUnitState) {
lineageInfo = LineageInfo.getLineageInfo(((WorkUnitState) state).getTaskBrokerNullable());
} else {
lineageInfo = Optional.absent();
}
Configuration conf = new Configuration();
Optional<String> uri = Optional.fromNullable(this.state.getProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI));
if (uri.isPresent()) {
this.fs = FileSystem.get(URI.create(uri.get()), conf);
} else {
this.fs = FileSystem.get(conf);
}
try {
this.hiveJdbcConnector = HiveJdbcConnector.newConnectorWithProps(state.getProperties());
} catch (SQLException e) {
throw new RuntimeException(e);
}
this.watermarker =
GobblinConstructorUtils.invokeConstructor(
HiveSourceWatermarkerFactory.class, state.getProp(HiveSource.HIVE_SOURCE_WATERMARKER_FACTORY_CLASS_KEY,
HiveSource.DEFAULT_HIVE_SOURCE_WATERMARKER_FACTORY_CLASS)).createFromState(state);
this.pool = HiveMetastoreClientPool.get(state.getProperties(),
Optional.fromNullable(state.getProperties().getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
}
@Override
public void initialize() throws IOException {
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
Set<String> cleanUpQueries = Sets.newLinkedHashSet();
Set<String> publishQueries = Sets.newLinkedHashSet();
List<String> directoriesToDelete = Lists.newArrayList();
try {
if (Iterables.tryFind(states, UNSUCCESSFUL_WORKUNIT).isPresent()) {
/////////////////////////////////////////
// Prepare cleanup and ignore publish
/////////////////////////////////////////
for (WorkUnitState wus : states) {
QueryBasedHivePublishEntity publishEntity = HiveAvroORCQueryGenerator.deserializePublishCommands(wus);
// Add cleanup commands - to be executed later
if (publishEntity.getCleanupQueries() != null) {
cleanUpQueries.addAll(publishEntity.getCleanupQueries());
}
if (publishEntity.getCleanupDirectories() != null) {
directoriesToDelete.addAll(publishEntity.getCleanupDirectories());
}
EventWorkunitUtils.setBeginPublishDDLExecuteTimeMetadata(wus, System.currentTimeMillis());
wus.setWorkingState(WorkingState.FAILED);
if (!wus.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) {
try {
new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_FAILED_EVENT, wus.getProperties()).submit();
} catch (Exception e) {
log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up comamnds", e);
}
}
}
} else {
/////////////////////////////////////////
// Prepare publish and cleanup commands
/////////////////////////////////////////
for (WorkUnitState wus : PARTITION_PUBLISH_ORDERING.sortedCopy(states)) {
QueryBasedHivePublishEntity publishEntity = HiveAvroORCQueryGenerator.deserializePublishCommands(wus);
// Add cleanup commands - to be executed later
if (publishEntity.getCleanupQueries() != null) {
cleanUpQueries.addAll(publishEntity.getCleanupQueries());
}
if (publishEntity.getCleanupDirectories() != null) {
directoriesToDelete.addAll(publishEntity.getCleanupDirectories());
}
if (publishEntity.getPublishDirectories() != null) {
// Publish snapshot / partition directories
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
for (Map.Entry<String, String> publishDir : publishDirectories.entrySet()) {
moveDirectory(publishDir.getKey(), publishDir.getValue());
}
}
if (publishEntity.getPublishQueries() != null) {
publishQueries.addAll(publishEntity.getPublishQueries());
}
}
/////////////////////////////////////////
// Core publish
/////////////////////////////////////////
// Update publish start timestamp on all workunits
for (WorkUnitState wus : PARTITION_PUBLISH_ORDERING.sortedCopy(states)) {
if (HiveAvroORCQueryGenerator.deserializePublishCommands(wus).getPublishQueries() != null) {
EventWorkunitUtils.setBeginPublishDDLExecuteTimeMetadata(wus, System.currentTimeMillis());
}
}
// Actual publish: Register snapshot / partition
executeQueries(Lists.newArrayList(publishQueries));
// Update publish completion timestamp on all workunits
for (WorkUnitState wus : PARTITION_PUBLISH_ORDERING.sortedCopy(states)) {
if (HiveAvroORCQueryGenerator.deserializePublishCommands(wus).getPublishQueries() != null) {
EventWorkunitUtils.setEndPublishDDLExecuteTimeMetadata(wus, System.currentTimeMillis());
}
wus.setWorkingState(WorkingState.COMMITTED);
this.watermarker.setActualHighWatermark(wus);
// Emit an SLA event for conversion successful
if (!wus.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) {
EventWorkunitUtils.setIsFirstPublishMetadata(wus);
try {
new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_SUCCESSFUL_SLA_EVENT, wus.getProperties())
.submit();
} catch (Exception e) {
log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up commands", e);
}
if (LineageUtils.shouldSetLineageInfo(wus)) {
setDestLineageInfo(wus, this.lineageInfo);
}
}
}
}
} finally {
/////////////////////////////////////////
// Preserving partition params
/////////////////////////////////////////
preservePartitionParams(states);
/////////////////////////////////////////
// Post publish cleanup
/////////////////////////////////////////
// Execute cleanup commands
try {
executeQueries(Lists.newArrayList(cleanUpQueries));
} catch (Exception e) {
log.error("Failed to cleanup staging entities in Hive metastore.", e);
}
try {
deleteDirectories(directoriesToDelete);
} catch (Exception e) {
log.error("Failed to cleanup staging directories.", e);
}
}
}
@VisibleForTesting
public static void setDestLineageInfo(WorkUnitState wus, Optional<LineageInfo> lineageInfo) {
HiveWorkUnit hiveWorkUnit = new HiveWorkUnit(wus.getWorkunit());
ConvertibleHiveDataset convertibleHiveDataset = (ConvertibleHiveDataset) hiveWorkUnit.getHiveDataset();
List<DatasetDescriptor> destDatasets = convertibleHiveDataset.getDestDatasets();
for (int i = 0; i < destDatasets.size(); i++) {
if (lineageInfo.isPresent()) {
lineageInfo.get().putDestination(destDatasets.get(i), i + 1, wus);
}
}
}
@VisibleForTesting
public void preservePartitionParams(Collection<? extends WorkUnitState> states) {
for (WorkUnitState wus : states) {
if (wus.getWorkingState() != WorkingState.COMMITTED) {
continue;
}
if (!wus.contains(COMPLETE_SOURCE_PARTITION_NAME)) {
continue;
}
if (!wus.contains(COMPLETE_DEST_PARTITION_NAME)) {
continue;
}
if (!(wus.contains(PARTITION_PARAMETERS_WHITELIST) || wus.contains(PARTITION_PARAMETERS_BLACKLIST))) {
continue;
}
List<String> whitelist = COMMA_SPLITTER.splitToList(wus.getProp(PARTITION_PARAMETERS_WHITELIST, StringUtils.EMPTY));
List<String> blacklist = COMMA_SPLITTER.splitToList(wus.getProp(PARTITION_PARAMETERS_BLACKLIST, StringUtils.EMPTY));
String completeSourcePartitionName = wus.getProp(COMPLETE_SOURCE_PARTITION_NAME);
String completeDestPartitionName = wus.getProp(COMPLETE_DEST_PARTITION_NAME);
if (!copyPartitionParams(completeSourcePartitionName, completeDestPartitionName, whitelist, blacklist)) {
log.warn("Unable to copy partition parameters from " + completeSourcePartitionName + " to "
+ completeDestPartitionName);
}
}
}
/**
* Method to copy partition parameters from source partition to destination partition
* @param completeSourcePartitionName dbName@tableName@partitionName
* @param completeDestPartitionName dbName@tableName@partitionName
*/
@VisibleForTesting
public boolean copyPartitionParams(String completeSourcePartitionName, String completeDestPartitionName,
List<String> whitelist, List<String> blacklist) {
Optional<Partition> sourcePartitionOptional = getPartitionObject(completeSourcePartitionName);
Optional<Partition> destPartitionOptional = getPartitionObject(completeDestPartitionName);
if ((!sourcePartitionOptional.isPresent()) || (!destPartitionOptional.isPresent())) {
return false;
}
Map<String, String> sourceParams = sourcePartitionOptional.get().getParameters();
Map<String, String> destParams = destPartitionOptional.get().getParameters();
for (Map.Entry<String, String> param : sourceParams.entrySet()) {
if (!matched(whitelist, blacklist, param.getKey())) {
continue;
}
destParams.put(param.getKey(), param.getValue());
}
destPartitionOptional.get().setParameters(destParams);
if (!dropPartition(completeDestPartitionName)) {
return false;
}
if (!addPartition(destPartitionOptional.get(), completeDestPartitionName)) {
return false;
}
return true;
}
@VisibleForTesting
public boolean dropPartition(String completePartitionName) {
List<String> partitionList = At_SPLITTER.splitToList(completePartitionName);
if (partitionList.size() != 3) {
log.warn("Invalid partition name " + completePartitionName);
return false;
}
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
client.get().dropPartition(partitionList.get(0), partitionList.get(1), partitionList.get(2), false);
return true;
} catch (IOException | TException e) {
log.warn("Unable to drop Partition " + completePartitionName);
}
return false;
}
@VisibleForTesting
public boolean addPartition(Partition destPartition, String completePartitionName) {
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
client.get().add_partition(destPartition);
return true;
} catch (IOException | TException e) {
log.warn("Unable to add Partition " + completePartitionName);
}
return false;
}
@VisibleForTesting
public Optional<Partition> getPartitionObject(String completePartitionName) {
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
List<String> partitionList = At_SPLITTER.splitToList(completePartitionName);
if (partitionList.size() != 3) {
log.warn("Invalid partition name " + completePartitionName);
return Optional.<Partition>absent();
}
Partition sourcePartition =
client.get().getPartition(partitionList.get(0), partitionList.get(1), partitionList.get(2));
return Optional.fromNullable(sourcePartition);
} catch (IOException | TException e) {
log.warn("Unable to get partition object from metastore for partition " + completePartitionName);
}
return Optional.<Partition>absent();
}
@VisibleForTesting
private boolean matched(List<String> whitelist, List<String> blacklist, String key) {
for (String patternStr : blacklist) {
if (Pattern.matches(getRegexPatternString(patternStr), key)) {
return false;
}
}
for (String patternStr : whitelist) {
if (Pattern.matches(getRegexPatternString(patternStr), key)) {
return true;
}
}
return false;
}
@VisibleForTesting
private String getRegexPatternString(String patternStr) {
patternStr = patternStr.replace("*", ".*");
StringBuilder builder = new StringBuilder();
builder.append("\\b").append(patternStr).append("\\b");
return patternStr;
}
private void moveDirectory(String sourceDir, String targetDir) throws IOException {
// If targetDir exists, delete it
if (this.fs.exists(new Path(targetDir))) {
deleteDirectory(targetDir);
}
// Create parent directories of targetDir
WriterUtils.mkdirsWithRecursivePermission(this.fs, new Path(targetDir).getParent(),
FsPermission.getCachePoolDefault());
// Move directory
log.info("Moving directory: " + sourceDir + " to: " + targetDir);
if (!this.fs.rename(new Path(sourceDir), new Path(targetDir))) {
throw new IOException(String.format("Unable to move %s to %s", sourceDir, targetDir));
}
}
private void deleteDirectories(List<String> directoriesToDelete) throws IOException {
for (String directory : directoriesToDelete) {
deleteDirectory(directory);
}
}
private void deleteDirectory(String dirToDelete) throws IOException {
if (StringUtils.isBlank(dirToDelete)) {
return;
}
log.info("Going to delete existing partition data: " + dirToDelete);
this.fs.delete(new Path(dirToDelete), true);
}
private void executeQueries(List<String> queries) {
if (null == queries || queries.size() == 0) {
return;
}
try {
this.hiveJdbcConnector.executeStatements(queries.toArray(new String[queries.size()]));
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states) throws IOException {
}
@Override
public void close() throws IOException {
this.avroSchemaManager.cleanupTempSchemas();
this.hiveJdbcConnector.close();
}
private static final Predicate<WorkUnitState> UNSUCCESSFUL_WORKUNIT = new Predicate<WorkUnitState>() {
@Override
public boolean apply(WorkUnitState input) {
return null == input || !WorkingState.SUCCESSFUL.equals(input.getWorkingState());
}
};
/**
* Publish workunits in lexicographic order of partition names.
* If a workunit is a noop workunit then {@link HiveWorkUnit#getPartitionName()}
* would be absent. This can happen while using {@link PartitionLevelWatermarker} where it creates a dummy workunit
* for all watermarks. It is safe to always publish this dummy workunit at the end as we do not want to update the
* ActualHighWatermark till all other partitions are successfully published. Hence we use nullsLast ordering.
*/
private static final Ordering<WorkUnitState> PARTITION_PUBLISH_ORDERING = Ordering.natural().nullsLast()
.onResultOf(new Function<WorkUnitState, String>() {
public String apply(@Nonnull WorkUnitState wus) {
return new HiveWorkUnit(wus.getWorkunit()).getPartitionName().orNull();
}
});
}
| 2,495 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/dataset/ConvertibleHiveDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.dataset;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import lombok.Getter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.conversion.hive.entities.StageableTableMetadata;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils.getOutputDataLocation;
/**
* <p>
* A {@link HiveDataset} that can be converted from one source format to several destination formats.
* This class holds the {@link ConversionConfig}s required for conversion into each
* destination format. The {@link ConversionConfig} for a destination format can be accessed by calling {@link #getConversionConfigForFormat(String)}.
* </p>
*
* <p>
* <b>Instantiation</b>
* <ul>
* <li> The constructor takes in a dataset {@link Config} which MUST have a comma separated list of destination formats at key,
* {@value #DESTINATION_CONVERSION_FORMATS_KEY}
* <li> Conversion configuration for a format can be set by using this destination format as prefix.
* <li> E.g. If {@value #DESTINATION_CONVERSION_FORMATS_KEY}=flattenedOrc,nestedOrc.<br>
* The destination table name for flattened ORC is set at flattenedOrc.tableName<br>
* And the destination table name for nested ORC is set at nestedOrc.tableName
* </ul>
* </p>
* @see ConversionConfig
*/
@ToString
@Slf4j
public class ConvertibleHiveDataset extends HiveDataset {
public static final String DESTINATION_CONVERSION_FORMATS_KEY = "destinationFormats";
// Destination formats
@Getter
private final Set<String> destFormats;
// Mapping for destination format to it's Conversion config
private final Map<String, ConversionConfig> destConversionConfigs;
// Source Dataset Descriptor
@Getter
private final DatasetDescriptor sourceDataset;
// List of destination Dataset Descriptor
@Getter
private final List<DatasetDescriptor> destDatasets;
/**
* <ul>
* <li> The constructor takes in a dataset {@link Config} which MUST have a comma separated list of destination formats at key,
* {@value #DESTINATION_CONVERSION_FORMATS_KEY}
* <li> Conversion configuration for a format can be set by using destination format as prefix.
* <li> E.g. If {@value #DESTINATION_CONVERSION_FORMATS_KEY}=flattenedOrc,nestedOrc.<br>
* The destination table name for flattened ORC is set at flattenedOrc.tableName<br>
* And the destination table name for nested ORC is set at nestedOrc.tableName
* </ul>
* @param fs
* @param clientPool
* @param table
* @param config
*/
public ConvertibleHiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Properties jobProps, Config config) {
super(fs, clientPool, table, jobProps, config);
Preconditions.checkArgument(config.hasPath(DESTINATION_CONVERSION_FORMATS_KEY), String.format(
"At least one destination format should be specified at %s.%s. If you do not intend to convert dataset %s set %s.%s to true",
super.properties.getProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, ""),
DESTINATION_CONVERSION_FORMATS_KEY,
table.getCompleteName(),
super.properties.getProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, ""),
HiveDatasetFinder.HIVE_DATASET_IS_BLACKLISTED_KEY));
// value for DESTINATION_CONVERSION_FORMATS_KEY can be a TypeSafe list or a comma separated list of string
this.destFormats = Sets.newHashSet(ConfigUtils.getStringList(this.datasetConfig, DESTINATION_CONVERSION_FORMATS_KEY));
// For each format create ConversionConfig and store it in a Map<format,conversionConfig>
this.destConversionConfigs = Maps.newHashMap();
for (String format : this.destFormats) {
if (this.datasetConfig.hasPath(format)) {
log.debug("Found destination format: " + format);
this.destConversionConfigs.put(format, new ConversionConfig(this.datasetConfig.getConfig(format), table, format));
}
}
this.sourceDataset = createSourceDataset();
this.destDatasets = createDestDatasets();
}
private List<DatasetDescriptor> createDestDatasets() {
List<DatasetDescriptor> destDatasets = new ArrayList<>();
for (String format : getDestFormats()) {
Optional<ConversionConfig> conversionConfigForFormat = getConversionConfigForFormat(format);
if (!conversionConfigForFormat.isPresent()) {
continue;
}
String destTable = conversionConfigForFormat.get().getDestinationDbName() + "." + conversionConfigForFormat.get()
.getDestinationTableName();
DatasetDescriptor dest = new DatasetDescriptor(DatasetConstants.PLATFORM_HIVE, destTable);
String destLocation = conversionConfigForFormat.get().getDataDstPathUseSubdir()
? getOutputDataLocation(conversionConfigForFormat.get().getDestinationDataPath())
: conversionConfigForFormat.get().getDestinationDataPath();
dest.addMetadata(DatasetConstants.FS_SCHEME, getSourceDataset().getMetadata().get(DatasetConstants.FS_SCHEME));
dest.addMetadata(DatasetConstants.FS_LOCATION, destLocation);
destDatasets.add(dest);
}
return destDatasets;
}
private DatasetDescriptor createSourceDataset() {
try {
String sourceTable = getTable().getDbName() + "." + getTable().getTableName();
DatasetDescriptor source = new DatasetDescriptor(DatasetConstants.PLATFORM_HIVE, sourceTable);
Path sourcePath = getTable().getDataLocation();
log.info(String.format("[%s]Source path %s being used in conversion", this.getClass().getName(), sourcePath));
String sourceLocation = Path.getPathWithoutSchemeAndAuthority(sourcePath).toString();
FileSystem sourceFs = sourcePath.getFileSystem(new Configuration());
source.addMetadata(DatasetConstants.FS_SCHEME, sourceFs.getScheme());
source.addMetadata(DatasetConstants.FS_LOCATION, sourceLocation);
return source;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Return the {@link ConversionConfig} for a destination format if available. If not return {@link Optional#absent()}
* @param format for which {@link ConversionConfig} needs to be returned
*/
public Optional<ConversionConfig> getConversionConfigForFormat(String format) {
return Optional.fromNullable(this.destConversionConfigs.get(format));
}
/**
* The Conversion configuration for converting from source format to each destination format.
* <p>
* <b>Required properties</b>
* <ul>
* <li>{@value #DESTINATION_DB_KEY}
* <li>{@value #DESTINATION_TABLE_KEY}
* <li>{@value #DESTINATION_DATA_PATH_KEY}
* </ul>
* <b>Optional properties</b>
* <ul>
* <li>{@value #CLUSTER_BY_KEY}
* <li>{@value #NUM_BUCKETS_KEY}
* <li>{@value #HIVE_RUNTIME_PROPERTIES_LIST_KEY} can be used to provide a list of hive properties to be set before
* conversion. The value should can be an array of keys and values or a comma separated string of keys and values.
* E.g. [key1,value1,key2,value2] or key1,value1,key2,value2
* <li>{@value #DESTINATION_TABLE_PROPERTIES_LIST_KEY} can be used to provide a list of table properties to be set
* on the destination table. The value should can be an array of keys and values or a comma separated string of keys and values.
* E.g. [key1,value1,key2,value2] or key1,value1,key2,value2
* </ul>
* <p>
*/
@Getter
@ToString
public static class ConversionConfig extends StageableTableMetadata {
public static final String DESTINATION_VIEW_KEY = "destination.viewName";
public static final String UPDATE_VIEW_ALWAYS_ENABLED = "updateViewAlways.enabled";
private final String destinationFormat;
// destinationViewName : If specified view with 'destinationViewName' is created if not already exists over destinationTableName
private final Optional<String> destinationViewName;
// updateViewAlwaysEnabled: If false 'destinationViewName' is only updated when schema evolves; if true 'destinationViewName'
// ... is always updated (everytime publish happens)
private final boolean updateViewAlwaysEnabled;
private ConversionConfig(Config config, Table table, String destinationFormat) {
super(config, table);
// Required
this.destinationFormat = destinationFormat;
// Optional
this.destinationViewName = Optional.fromNullable(resolveTemplate(ConfigUtils.getString(config, DESTINATION_VIEW_KEY, null), table));
this.updateViewAlwaysEnabled = ConfigUtils.getBoolean(config, UPDATE_VIEW_ALWAYS_ENABLED, true);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
ConversionConfig that = (ConversionConfig) o;
if (isUpdateViewAlwaysEnabled() != that.isUpdateViewAlwaysEnabled()) {
return false;
}
if (!getDestinationFormat().equals(that.getDestinationFormat())) {
return false;
}
return getDestinationViewName().equals(that.getDestinationViewName());
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + getDestinationFormat().hashCode();
result = 31 * result + getDestinationViewName().hashCode();
result = 31 * result + (isUpdateViewAlwaysEnabled() ? 1 : 0);
return result;
}
}
}
| 2,496 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/dataset/ConvertibleHiveDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.dataset;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.api.Table;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.metrics.event.EventSubmitter;
/**
* A {@link HiveDatasetFinder} to create {@link ConvertibleHiveDataset}s
*/
public class ConvertibleHiveDatasetFinder extends HiveDatasetFinder {
public ConvertibleHiveDatasetFinder(FileSystem fs, Properties properties, EventSubmitter eventSubmitter) throws IOException {
super(fs, properties, eventSubmitter);
}
protected ConvertibleHiveDataset createHiveDataset(Table table, Config config) {
return new ConvertibleHiveDataset(super.fs, super.clientPool, new org.apache.hadoop.hive.ql.metadata.Table(table),
this.properties, config);
}
}
| 2,497 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/source/HiveWorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.source;
import java.lang.reflect.Type;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import com.google.common.base.Optional;
import com.google.common.reflect.TypeToken;
import com.google.gson.Gson;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.hadoop.hive.ql.metadata.Partition;
/**
* A {@link WorkUnit} wrapper for {@link HiveSource}. This is class is meant to hide the keys at which workunit values are stored.
* The source class is supposed to read/write values into the {@link WorkUnit} through getters/setters instead of directly accessing
* through {@link #getProp(String, String)}/{@link #setProp(String, Object)}
*/
public class HiveWorkUnit extends WorkUnit {
private static final String PREFIX = "hive.source.dataset";
private static final String HIVE_DATASET_SERIALIZED_KEY = PREFIX + ".serialized";
private static final String HIVE_TABLE_SCHEMA_URL_KEY = PREFIX + ".table.schemaUrl";
private static final String HIVE_TABLE_LOCATION_KEY = PREFIX + ".table.location";
private static final String HIVE_PARTITION_SCHEMA_URL_KEY = PREFIX + ".partition.schemaUrl";
private static final String HIVE_PARTITION_NAME_KEY = PREFIX + ".partition.name";
private static final String HIVE_PARTITION_LOCATION_KEY = PREFIX + ".partition.location";
private static final String HIVE_PARTITION_KEYS = PREFIX + ".partition.keys";
private static final Gson GSON = new Gson();
private static final Type FIELD_SCHEMA_TYPE = new TypeToken<List<FieldSchema>>() {}.getType();
@SuppressWarnings("deprecation")
public HiveWorkUnit() {
super();
}
@SuppressWarnings("deprecation")
public HiveWorkUnit(WorkUnit workunit) {
super(workunit);
}
/**
* Automatically serializes the {@link HiveDataset} by calling {@link #setHiveDataset(HiveDataset)}
* @param hiveDataset for which the workunit is being created
*/
@SuppressWarnings("deprecation")
public HiveWorkUnit(HiveDataset hiveDataset) {
super();
setHiveDataset(hiveDataset);
if (hiveDataset.getTable().getTableType() != TableType.VIRTUAL_VIEW) {
setTableLocation(hiveDataset.getTable().getSd().getLocation());
}
}
public HiveWorkUnit(HiveDataset hiveDataset, Partition partition) {
this(hiveDataset);
setPartitionName(partition.getName());
setPartitionLocation(partition.getLocation());
setPartitionKeys(partition.getTable().getPartitionKeys());
}
/**
* Sets the {@link ConfigurationKeys#DATASET_URN_KEY} key.
*/
public void setDatasetUrn(String datasetUrn) {
this.setProp(ConfigurationKeys.DATASET_URN_KEY, datasetUrn);
}
public String getDatasetUrn(String datasetUrn) {
return this.getProp(ConfigurationKeys.DATASET_URN_KEY);
}
/**
* Automatically sets the dataset urn by calling {@link #setDatasetUrn(String)}
*/
public void setHiveDataset(HiveDataset hiveDataset) {
this.setProp(HIVE_DATASET_SERIALIZED_KEY, HiveSource.GENERICS_AWARE_GSON.toJson(hiveDataset, HiveDataset.class));
setDatasetUrn(hiveDataset.getTable().getCompleteName());
}
public HiveDataset getHiveDataset() {
return HiveSource.GENERICS_AWARE_GSON.fromJson(this.getProp(HIVE_DATASET_SERIALIZED_KEY), HiveDataset.class);
}
/**
* Set the schema url for this table into the {@link WorkUnit}
*/
public void setTableSchemaUrl(Path schemaUrl) {
this.setProp(HIVE_TABLE_SCHEMA_URL_KEY, schemaUrl.toString());
}
public Path getTableSchemaUrl() {
return new Path(this.getProp(HIVE_TABLE_SCHEMA_URL_KEY));
}
/**
* Set the schema url for a partition into the {@link WorkUnit}
*/
public void setPartitionSchemaUrl(Path schemaUrl) {
this.setProp(HIVE_PARTITION_SCHEMA_URL_KEY, schemaUrl.toString());
}
/**
* Get the schema url path for the partition if this {@link WorkUnit} is for a partitioned table.
* If not, return {@link Optional#absent()}
*/
public Optional<Path> getPartitionSchemaUrl() {
return StringUtils.isNotBlank(this.getProp(HIVE_PARTITION_SCHEMA_URL_KEY)) ? Optional.<Path> of(new Path(this.getProp(HIVE_PARTITION_SCHEMA_URL_KEY)))
: Optional.<Path> absent();
}
/**
* Set the name of the partition into the {@link WorkUnit}
* @param partitionName
*/
public void setPartitionName(String partitionName) {
this.setProp(HIVE_PARTITION_NAME_KEY, partitionName);
}
/**
* Get the name for the partition if this {@link WorkUnit} is for a partitioned table.
* If not, return {@link Optional#absent()}
*/
public Optional<String> getPartitionName() {
return Optional.fromNullable(this.getProp(HIVE_PARTITION_NAME_KEY));
}
/**
* Set the name of the partition into the {@link WorkUnit}
* @param partitionName
*/
public void setTableLocation(String partitionLocation) {
this.setProp(HIVE_TABLE_LOCATION_KEY, partitionLocation);
}
/**
* Get the name for the partition if this {@link WorkUnit} is for a partitioned table.
* If not, return {@link Optional#absent()}
*/
public Optional<String> getTableLocation() {
return Optional.fromNullable(this.getProp(HIVE_TABLE_LOCATION_KEY));
}
/**
* Set the name of the partition into the {@link WorkUnit}
* @param partitionName
*/
public void setPartitionLocation(String partitionLocation) {
this.setProp(HIVE_PARTITION_LOCATION_KEY, partitionLocation);
}
/**
* Get the name for the partition if this {@link WorkUnit} is for a partitioned table.
* If not, return {@link Optional#absent()}
*/
public Optional<String> getPartitionLocation() {
return Optional.fromNullable(this.getProp(HIVE_PARTITION_LOCATION_KEY));
}
/**
* Set partition keys into the {@link WorkUnit}
* @param partitionName
*/
public void setPartitionKeys(List<FieldSchema> partitionKeys) {
this.setProp(HIVE_PARTITION_KEYS, GSON.toJson(partitionKeys, FIELD_SCHEMA_TYPE));
}
/**
* Get the partition keys if this {@link WorkUnit} is for a partitioned table.
* If not, return {@link Optional#absent()}
*/
public Optional<List<FieldSchema>> getPartitionKeys() {
String serialzed = this.getProp(HIVE_PARTITION_KEYS);
if (serialzed == null) {
return Optional.absent();
}
List<FieldSchema> deserialized = GSON.fromJson(serialzed, FIELD_SCHEMA_TYPE);
return Optional.of(deserialized);
}
}
| 2,498 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/source/HiveAvroToOrcSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.source;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import java.util.List;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDatasetFinder;
import org.apache.gobblin.data.management.conversion.hive.utils.LineageUtils;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An extension to {@link HiveSource} that is used for Avro to ORC conversion jobs.
*/
public class HiveAvroToOrcSource extends HiveSource {
private Optional<LineageInfo> lineageInfo;
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
if (!state.contains(HIVE_SOURCE_DATASET_FINDER_CLASS_KEY)) {
state.setProp(HIVE_SOURCE_DATASET_FINDER_CLASS_KEY, ConvertibleHiveDatasetFinder.class.getName());
}
if (!state.contains(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY)) {
state.setProp(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, "hive.conversion.avro");
}
this.lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
List<WorkUnit> workunits = super.getWorkunits(state);
for (WorkUnit workUnit : workunits) {
if (LineageUtils.shouldSetLineageInfo(workUnit)) {
setSourceLineageInfo(workUnit, this.lineageInfo);
}
}
return workunits;
}
@VisibleForTesting
public void setSourceLineageInfo(WorkUnit workUnit, Optional<LineageInfo> lineageInfo) {
HiveWorkUnit hiveWorkUnit = new HiveWorkUnit(workUnit);
ConvertibleHiveDataset convertibleHiveDataset = (ConvertibleHiveDataset) hiveWorkUnit.getHiveDataset();
DatasetDescriptor sourceDataset = convertibleHiveDataset.getSourceDataset();
if (lineageInfo.isPresent()) {
lineageInfo.get().setSource(sourceDataset, workUnit);
}
}
} | 2,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.